blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1be151562d01c426c047d45f4208698afadcc1d1 | 1e48d99606711d4f938f42c9d966d8b2adf36efa | /send_email/models.py | 2b01a9d54bf55b5f209b2197c48e5a1989a17e49 | [] | no_license | alastairwp/yourvoteapp | f59c399b07a207733899c067fa5dd6a137133d1a | 9124affcc3fbcc4e361d4259b4915552fb4c87e8 | refs/heads/master | 2022-11-13T10:07:11.511929 | 2019-12-10T15:57:15 | 2019-12-10T15:57:15 | 213,521,901 | 0 | 0 | null | 2022-11-04T19:30:55 | 2019-10-08T01:32:06 | Python | UTF-8 | Python | false | false | 2,744 | py | from django import template
from django.conf import settings
from django.core.mail import send_mail, EmailMultiAlternatives
from django.db import models
from django.template import Context
class EmailTemplate(models.Model):
subject = models.CharField(max_length=255, blank=True, null=True)
to_email = models.CharField(max_length=255, blank=True, null=True)
from_email = models.CharField(max_length=255, blank=True, null=True)
html_template = models.TextField(blank=True, null=True)
plain_text = models.TextField(blank=True, null=True)
is_html = models.BooleanField(default=False)
is_text = models.BooleanField(default=False)
template_key = models.CharField(max_length=255, unique=True, null=False, blank=False, default='')
def get_rendered_template(self, tpl, context):
return self.get_template(tpl).render(context)
def get_template(self, tpl):
return template.Template(tpl)
def get_subject(self, subject, context):
return subject or self.get_rendered_template(self.subject, context)
def get_body(self, body, context):
return body or self.get_rendered_template(self._get_body(), context)
def get_sender(self):
return self.from_email or settings.DEFAULT_FROM_EMAIL
def get_recipient(self, emails, context):
return emails or [self.get_rendered_template(self.to_email, context)]
@staticmethod
def send(*args, **kwargs):
EmailTemplate._send(*args, **kwargs)
@staticmethod
def _send(template_key, context, subject=None, body=None, sender=None,
emails=None, bcc=None, attachments=None):
mail_template = EmailTemplate.objects.get(template_key=template_key)
context = Context(context)
subject = mail_template.get_subject(subject, context)
body = mail_template.get_body(body, context)
sender = sender or mail_template.get_sender()
emails = mail_template.get_recipient(emails, context)
if mail_template.is_text:
return send_mail(subject, body, sender, emails, fail_silently=not
settings.DEBUG)
msg = EmailMultiAlternatives(subject, body, sender, emails,
alternatives=((body, 'text/html'),),
bcc=bcc
)
if attachments:
for name, content, mimetype in attachments:
msg.attach(name, content, mimetype)
return msg.send(fail_silently=not settings.DEBUG)
def _get_body(self):
if self.is_text:
return self.plain_text
return self.html_template
def __str__(self):
return "<{}> {}".format(self.template_key, self.subject) | [
"alastairwp@gmail.com"
] | alastairwp@gmail.com |
05a2d22595769aabb8ba1288219cbc5896aff69b | 837fcd0d7e40de15f52c73054709bd40264273d2 | /practices_loop-master/sum_user_quit.py | 7d4bd070a2e7a364a41b6719421b8247f5090e2f | [] | no_license | NEHAISRANI/Python_Programs | dee9e05ac174a4fd4dd3ae5e96079e10205e18f9 | aa108a56a0b357ca43129e59377ac35609919667 | refs/heads/master | 2020-11-25T07:20:00.484973 | 2020-03-08T12:17:39 | 2020-03-08T12:17:39 | 228,554,399 | 0 | 1 | null | 2020-10-01T06:41:20 | 2019-12-17T07:04:31 | Python | UTF-8 | Python | false | false | 333 | py | #In this program if user input 4 then sum all numbers from starting to ending. if user input quit then program exit"
user=raw_input("enter your number")
index=1
var1=0
while index<=user:
if user=="quit":
break
user=int(user)
if index<=user:
var1=var1+index
index=index+1
if var1!=0:
print var1
| [
"nehai18@navgurukul.org"
] | nehai18@navgurukul.org |
f1d4d811e589b5dcd06b4f29a6ceb315157fd32c | 7e0033782d89f435a2f209be90e2baf471fa0925 | /pd001/src/boletin/migrations/0001_initial.py | c48f6ece915f70c458d694500a2b27c6e45a23e8 | [] | no_license | AnthonyJe/boletin-chalco | 23b3d2c678dd59b07bacc9574e9a38e0285eb18c | f02fbb85911dd1a96a0c5d818f461d395898b9b5 | refs/heads/master | 2022-12-10T05:34:12.429904 | 2017-01-12T13:29:41 | 2017-01-12T13:29:41 | 78,716,365 | 0 | 0 | null | 2022-11-29T14:08:49 | 2017-01-12T06:37:14 | Python | UTF-8 | Python | false | false | 714 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-12 02:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Registrado',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(blank=True, max_length=100, null=True)),
('email', models.EmailField(max_length=254)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"jesus_omega97@hotmail.com"
] | jesus_omega97@hotmail.com |
09949cbad4812a9be9a7484fec23b89a330aae58 | 41c41815e6c52a64a6b25c7805639ba7eda5b28b | /Beginner/Day 2: Tip Calculator.py | a3a19a84187009af90c9ced8dc0f5d5914da92b9 | [] | no_license | JuliaDer/100-Days-of-Python | c640cbb3c445fdc185eb29cb57f4605dc6ddc030 | 2f434f76f894d3c11163f021142f4c3dcb16fffd | refs/heads/main | 2023-04-06T21:32:12.075816 | 2021-04-05T19:37:37 | 2021-04-05T19:37:37 | 328,598,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # If the bill was $150.00, split between 5 people, with 12% tip.
# Format the result to 2 decimal places = 33.60
print("Welcome to the tip calculator")
bill = float(input("What was the total bill? $"))
tip = int(input("What percentage tip would you like to give? 10, 12 or 15? "))
people = int(input("How many people to split the bill? "))
tip_as_percent = tip / 100
total_tip = bill * tip_as_percent
total_bill = bill + total_tip
bill_per_person = total_bill / people
final_amount = round(bill_per_person, 2)
print(f"Each person should pay: ${final_amount}")
| [
"julia.dernovska@gmail.com"
] | julia.dernovska@gmail.com |
473bc8b122476c58effa1fed8f46db5139019970 | f29a25b12fd5c8d8325bda41c6ae4482b2e9c287 | /python/draw_action.py | 2734a0e8faf22d041f3eafe7de5008bf9670b2ef | [
"MIT"
] | permissive | mattruzzi/Pathfind3r | bc7be2db5182832e3fcc90c116e8e762b6ee5351 | 2b65b09030c87465137ccd9b4da991f087dee5bc | refs/heads/master | 2023-06-24T20:09:12.102771 | 2021-07-27T04:02:16 | 2021-07-27T04:02:16 | 282,509,454 | 1 | 0 | null | 2020-07-25T19:15:13 | 2020-07-25T19:15:13 | null | UTF-8 | Python | false | false | 1,300 | py | #!/usr/bin/python
# MIT License
#
# Copyright (c) [2016] [Okan Ulas Gezeroglu]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class DrawAction:
PEN_UP = 1
PEN_DOWN = 2
PEN_MOVE = 3
def __init__(self, t, x=0, y=0):
self.t = t
self.x = x
self.y = y
| [
"okan@peakgames.net"
] | okan@peakgames.net |
151db284810e59ba5fc2db2861b18b0362d9067a | 35d581fa04352504e9b6c3120d8af620e9a6971a | /src/benchmark/chart/exceptions.py | 74dd9369c8aa27124ecb4fffc232a1995b6c99bc | [] | no_license | prosoftwaredev/python-pandas-jupyter-numpy- | 5514534649bf25db45c98c2627b5164c291f21f2 | c2e99e45879432d5c27924b3f205cb4ad6339020 | refs/heads/master | 2020-03-23T20:39:23.922770 | 2018-07-23T19:02:35 | 2018-07-23T19:02:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py |
class FilterParamsNotSupported(Exception):
""" Raise when user trying to filter by not supported params. """
pass
| [
"final.fantasy.dev@gmail.com"
] | final.fantasy.dev@gmail.com |
e8c6a7022c8dd800947f80f28cce13496e92fa02 | acdae9c6fbe29a81e5264347845aa4e41b42cc8b | /youtube.py | 33fd076bd39dbc7d8f00e5dbf5d65d475191b7b4 | [] | no_license | nejni-marji/nenmaj_bot | 49c9abe20a4070fe4aa88c90a37c3fc93f5854c4 | a9bdbf98f0d5036056425e5a514f0adc608f0188 | refs/heads/master | 2021-01-19T02:07:07.376554 | 2017-09-04T01:58:39 | 2017-09-04T01:58:39 | 87,262,270 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,258 | py | #!/usr/bin/env python3
import re
from os.path import dirname
from glob import glob
from subprocess import call
from os.path import getsize
import telegram as tg
import telegram.ext as tg_ext
import youtube_dl
from bin.background import background
myself = int(open(dirname(__file__) + '/private/myself').read())
yt_dir = dirname(__file__) + '/private/youtube/'
class YouTube():
def __init__(self, bot, ids, first_name, args, mode, debug = False):
# head
self.bot = bot
self.user_id, self.chat_id, self.message_id = ids
self.first_name = first_name
self.mode = mode
self.debug = debug
if self.get_video_id(args) == False:
return None
self.send_report()
self.init_info()
# body
try:
if mode == 'audio':
self.get_video('140', '.m4a')
self.get_prefix('RAW')
self.get_mp3('-unedited.mp3')
self.get_size('-unedited.mp3')
self.send_audio('-unedited.mp3')
if mode == 'video':
self.get_video('18', '.mp4')
self.get_prefix('RAW')
#self.get_mp3('.m4a')
self.get_size('.mp4')
self.send_video('.mp4')
if mode == 'nightcore':
self.get_video('140', '.m4a')
self.get_prefix('NC')
self.get_nightcore('-nightcore.mp3')
self.get_size('-nightcore.mp3')
self.send_audio('-nightcore.mp3')
if mode == 'daycore':
self.get_video('140', '.m4a')
self.get_prefix('DC')
self.get_daycore('-daycore.mp3')
self.get_size('-daycore.mp3')
self.send_audio('-daycore.mp3')
if not self.debug:
self.send_channel()
self.send_info('Finished', [self.file_name, self.size])
except:
self.send_info('Failed', [self.video_id])
def get_video_id(self, args):
pat = '((?<=v=)|(?<=youtu\.be\/)|^)[0-9A-Za-z-_]{11}$'
try:
self.video_id = re.search(pat, args[0], re.I).group()
return True
except:
return False
def send_report(self):
self.bot.send_message(myself, '\n'.join([
'#ALERT',
'#{}'.format(self.mode.upper()),
'*{} ({}) is trying to use your bot to {} a video!*'.format(
self.first_name,
self.user_id,
{
'audio': 'download the audio from',
'video': 'download',
'nightcore': 'make a nightcore of',
'daycore': 'make a daycore of',
}[self.mode]
),
'The video is: youtu.be/{}'.format(self.video_id),
]))
def send_channel(self):
self.bot.forward_message(
chat_id = '@nenmaj_cravi',
from_chat_id = self.chat_id,
message_id = self.upload_id,
)
def init_info(self):
self.info_id = self.bot.send_message(self.chat_id,
'Starting process...',
reply_to_message_id = self.message_id,
).message_id
def send_info(self, label, data):
text = '{}:\n{}'.format(label, '\n'.join(data))
self.bot.edit_message_text(
chat_id = self.chat_id,
message_id = self.info_id,
text = text
)
def ydl(self, video_format):
ydl_opts = {
'format': video_format,
'outtmpl': yt_dir + '%(title)s-%(id)s.%(ext)s',
'restrictfilenames': True,
}
# download video
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([self.video_id])
def get_size(self, ext):
size = getsize(yt_dir + self.file_name + ext)
if size >= 1024:
size = size/1024
prefix = 'K'
if size >= 1024:
size = size/1024
prefix = 'M'
if size >= 1024:
size = size/1024
prefix = 'G'
self.size = '{0:.2f}'.format(size) + ' {}B'.format(prefix)
def get_prefix(self, prefix):
self.prefixed = '{}:{}'.format(prefix, self.file_name)
def get_video(self, num, ext):
self.send_info('Downloading', [self.video_id])
path = glob('{}*-{}{}'.format(yt_dir, self.video_id, ext))
if not path: # call ydl
self.ydl(num)
self.file_name = glob('{}*-{}{}'.format(yt_dir, self.video_id, ext))
self.file_name = self.file_name[0].split('/')
self.file_name.reverse()
self.file_name = self.file_name[0][:-4]
def get_mp3(self, ext):
self.send_info('Converting to mp3', [self.file_name])
run = 'ffmpeg -i {}.m4a '.format(yt_dir + self.file_name)
run += '-n -loglevel error '
run += '{}{}'.format(yt_dir + self.file_name, ext)
call(run, shell = True)
def get_nightcore(self, ext):
self.send_info('Nightcoring', [self.file_name])
run = 'ffmpeg -i {}.m4a '.format(yt_dir + self.file_name)
run += '-n -loglevel error '
run += '-af "asetrate=44100*1.15,atempo=1.05" '
run += '{}{}'.format(yt_dir + self.file_name, ext)
call(run, shell = True)
def get_daycore(self, ext):
self.send_info('Daycoring', [self.file_name])
run = 'ffmpeg -i {}.m4a '.format(yt_dir + self.file_name)
run += '-n -loglevel error '
run += '-af "asetrate=44100*0.85,atempo=0.95" '
run += '{}{}'.format(yt_dir + self.file_name, ext)
call(run, shell = True)
def send_audio(self, ext):
self.send_info('Sending audio', [self.file_name, self.size])
self.upload_id = self.bot.send_audio(self.chat_id,
open(yt_dir + self.file_name + ext, 'rb'),
title = self.prefixed + ext
).message_id
def send_video(self, ext):
self.send_info('Sending video', [self.file_name, self.size])
self.upload_id = self.bot.send_video(self.chat_id,
open(yt_dir + self.file_name + ext, 'rb'),
caption = self.file_name + ext
).message_id
@background
def youtube_meta(bot, update, args, mode, debug = False):
ids = [
update.message.from_user.id,
update.message.chat_id,
update.message.message_id,
]
first_name = update.message.from_user.first_name
YouTube(bot, ids, first_name, args, mode, debug = debug)
def youtube_audio(bot, update, args):
youtube_meta(bot, update, args, 'audio')
def youtube_video(bot, update, args):
youtube_meta(bot, update, args, 'video')
def youtube_nightcore(bot, update, args):
youtube_meta(bot, update, args, 'nightcore')
def youtube_daycore(bot, update, args):
youtube_meta(bot, update, args, 'daycore')
def youtube_debug(bot, update, args):
mode = args.pop(0)
youtube_meta(bot, update, args, mode, debug = True)
def main(dp, group):
for i in [
tg_ext.CommandHandler('audio', youtube_audio, pass_args = True),
tg_ext.CommandHandler('video', youtube_video, pass_args = True),
tg_ext.CommandHandler('nightcore', youtube_nightcore, pass_args = True),
tg_ext.CommandHandler('daycore', youtube_daycore, pass_args = True),
tg_ext.CommandHandler('debug', youtube_debug, pass_args = True),
]: dp.add_handler(i, group)
| [
"nejni.marji@gmail.com"
] | nejni.marji@gmail.com |
b98c8338f9919c87f47ead364dc8f8712f9e553d | 8174ebfd09d6ad104be52d1c3bee7f6b1b02e2b6 | /PCProject/MachineLearningEarlyStage/WAVtoCSV.py | 2d6df878d1cc05dadb88a029ce0b74d9ded62f6a | [] | no_license | alexdavies325/AudioClassificationMicrocontroller | b31f951598cf860d7c157216a006c1ff6fb8ed88 | 586008a33f307ef0f32f6118f820b5a0f979b840 | refs/heads/master | 2020-12-22T13:09:24.778931 | 2020-01-28T19:10:22 | 2020-01-28T19:10:22 | 236,790,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | import os
import pandas as pd
path = '/home/alex/Alex/QMUL/Project2/Machine learning/Wavefiles'
os.chdir(path)
dict1= {'filename' : [], 'label' : []}
for filename in os.listdir(path):
with open(filename) as f:
stringSplit = filename.split("B")
stringName = stringSplit[0]
print(stringName)
dict1['filename'].append(filename)
dict1['label'].append(stringName)
##pathSplit = path.split("/")
##print(pathSplit)
##path = ''
##print(len(pathSplit)-1)
##
##for i in range(len(pathSplit)-1):
## print(i)
## path = path + '/' + pathSplit[i]
##
##
##for i in path:
##
##
for i in range(1, len(path)+1):
if path[-i] == '/':
path = path[:-i]
break
os.chdir(path)
df = pd.DataFrame(dict1)
df.to_csv('materials.csv', index = False)
| [
"alexdavies325@gmail.com"
] | alexdavies325@gmail.com |
31c447acd3b746744f3991cbcdac3ee2c59fc519 | d8a027585fd6f199534d58a546031c3b6984f83d | /dekilbot.py | f1388e1f2b180b371154a61191f5ee5db60aba4b | [] | no_license | dekilgrind/cokdeh | 9d5d7dac1de38333a1b974770efc609c85ebda20 | 1325a091d007be07c4ef12be690c89872e862d54 | refs/heads/master | 2020-04-15T22:43:05.363773 | 2019-01-11T12:27:59 | 2019-01-11T12:27:59 | 165,083,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206,375 | py | # -*- coding: utf-8 -*-
from linepy import *
from datetime import datetime
from time import sleep
from bs4 import BeautifulSoup
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse
from gtts import gTTS
from googletrans import Translator
#==============================================================================#
botStart = time.time()
nadya = LINE()
#nadya = LINE("TOKEN KAMU")
#nadya = LINE("Email","Password")
nadya.log("Auth Token : " + str(nadya.authToken))
channelToken = nadya.getChannelResult()
nadya.log("Channel Token : " + str(channelToken))
ki = LINE()
#ki = LINE("TOKEN KAMU")
#ki = LINE("Email","Password")
ki.log("Auth Token : " + str(ki.authToken))
channelToken = ki.getChannelResult()
ki.log("Channel Token : " + str(channelToken))
ki2 = LINE()
#ki2 = LINE("TOKEN KAMU")
#ki2 = LINE("Email","Password")
ki2.log("Auth Token : " + str(ki2.authToken))
channelToken = ki2.getChannelResult()
ki2.log("Channel Token : " + str(channelToken))
ki3 = LINE()
#ki3 = LINE("TOKEN KAMU")
#ki3 = LINE("Email","Password")
ki3.log("Auth Token : " + str(ki3.authToken))
channelToken = ki3.getChannelResult()
ki3.log("Channel Token : " + str(channelToken))
ki4 = LINE()
#ki4 = LINE("TOKEN KAMU")
#ki4 = LINE("Email","Password")
ki4.log("Auth Token : " + str(ki4.authToken))
channelToken = ki4.getChannelResult()
ki4.log("Channel Token : " + str(channelToken))
KAC = [nadya,ki,ki2,ki3,ki4]
nadyaMID = nadya.profile.mid
kiMID = ki.profile.mid
ki2MID = ki2.profile.mid
ki3MID = ki3.profile.mid
ki4MID = ki4.profile.mid
Bots = [nadyaMID,kiMID,ki2MID,ki3MID,ki4MID]
creator = ["",""]
Owner = [""]
admin = [""]
nadyaProfile = nadya.getProfile()
kiProfile = ki.getProfile()
ki2Profile = ki2.getProfile()
ki3Profile = ki3.getProfile()
ki4Profile = ki4.getProfile()
lineSettings = nadya.getSettings()
kiSettings = ki.getSettings()
ki2Settings = ki2.getSettings()
ki3Settings = ki3.getSettings()
ki4Settings = ki4.getSettings()
oepoll = OEPoll(nadya)
oepoll1 = OEPoll(ki)
oepoll2 = OEPoll(ki2)
oepoll3 = OEPoll(ki3)
oepoll4 = OEPoll(ki4)
responsename = nadya.getProfile().displayName
responsename2 = ki.getProfile().displayName
responsename3 = ki2.getProfile().displayName
responsename4 = ki3.getProfile().displayName
responsename5 = ki4.getProfile().displayName
#==============================================================================#
with open('Owner.json', 'r') as fp:
Owner = json.load(fp)
with open('admin.json', 'r') as fp:
admin = json.load(fp)
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
myProfile["displayName"] = nadyaProfile.displayName
myProfile["statusMessage"] = nadyaProfile.statusMessage
myProfile["pictureStatus"] = nadyaProfile.pictureStatus
readOpen = codecs.open("read.json","r","utf-8")
settingsOpen = codecs.open("temp.json","r","utf-8")
#==============================================================================#
read = json.load(readOpen)
settings = json.load(settingsOpen)
#if settings["restartPoint"] != None:
# nadya.sendMessage(settings["restartPoint"], "Bot kembali aktif")
# settings["restartBot"] = None
def restartBot():
print ("[ INFO ] BOT RESETTED")
backupData()
# time.sleep(10)
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(text):
nadya.log("[ ERROR ] " + str(text))
time_ = datetime.now()
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def sendMessageWithMention(to, mid):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(mid)+'}'
text_ = '@x '
nadya.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
def helpmessage():
helpMessage = "╔═══════════════" + "\n" + \
"╠😬 👿👿👿 DEKIL SANGE 👿👿👿 😬" + "\n" + \
"║" + "\n" + \
"╠══✪〘 Help Message 〙✪══" + "\n" + \
"║" + "\n" + \
"╠✪〘 Help 〙✪════════" + "\n" + \
"╠➥ Help" + "\n" + \
"╠➥ Translate" + "\n" + \
"╠➥ Texttospeech" + "\n" + \
"║" + "\n" + \
"╠✪〘 Protect 〙✪═══════" + "\n" + \
"╠➥ Protect 「On/Off」" + "\n" + \
"╠➥ QrProtect 「On/Off」" + "\n" + \
"╠➥ InviteProtect 「On/Off」" + "\n" + \
"╠➥ CancelProtect 「On/Off」" + "\n" + \
"╠➥ SetPro 「On/Off」" + "\n" + \
"║" + "\n" + \
"╠✪〘 Status 〙✪════════" + "\n" + \
"╠➥ Restart" + "\n" + \
"╠➥ Runtime" + "\n" + \
"╠➥ Speed" + "\n" + \
"╠➥ Status" + "\n" + \
"╠➥ About" + "\n" + \
"║" + "\n" + \
"╠✪〘 Settings 〙✪═══════" + "\n" + \
"╠➥ AutoAdd「On/Off」" + "\n" + \
"╠➥ AutoJoin「On/Off」" + "\n" + \
"╠➥ AutoLeave「On/Off」" + "\n" + \
"╠➥ AutoRead「On/Off」" + "\n" + \
"╠➥ CheckSticker「On/Off」" + "\n" + \
"╠➥ DetectMention「On/Off」" + "\n" + \
"║" + "\n" + \
"╠✪〘 Self 〙✪═════════" + "\n" + \
"╠➥ Me" + "\n" + \
"╠➥ MyMid" + "\n" + \
"╠➥ MyName" + "\n" + \
"╠➥ MyBio" + "\n" + \
"╠➥ MyPicture" + "\n" + \
"╠➥ MyVideoProfile" + "\n" + \
"╠➥ MyCover" + "\n" + \
"╠➥ StealContact「Mention」" + "\n" + \
"╠➥ StealMid「Mention」" + "\n" + \
"╠➥ StealName「Mention」" + "\n" + \
"╠➥ StealBio「Mention」" + "\n" + \
"╠➥ StealPicture「Mention」" + "\n" + \
"╠➥ StealVideoProfile「Mention」" + "\n" + \
"╠➥ StealCover「Mention」" + "\n" + \
"╠➥ CloneProfile「Mention」" + "\n" + \
"╠➥ RestoreProfile" + "\n" + \
"║" + "\n" + \
"╠✪〘 Group 〙✪════════" + "\n" + \
"╠➥ GroupCreator" + "\n" + \
"╠➥ GroupId" + "\n" + \
"╠➥ GroupName" + "\n" + \
"╠➥ GroupPicture" + "\n" + \
"╠➥ GroupTicket" + "\n" + \
"╠➥ GroupTicket「On/Off」" + "\n" + \
"╠➥ GroupList" + "\n" + \
"╠➥ GroupMemberList" + "\n" + \
"╠➥ GroupInfo" + "\n" + \
"╠➥ Kill「Mention」" + "\n" + \
"╠➥ KickAllMember"+ "\n" + \
"║" + "\n" + \
"╠✪〘 Special 〙✪═══════" + "\n" + \
"╠➥ Mimic「On/Off」" + "\n" + \
"╠➥ MimicList" + "\n" + \
"╠➥ MimicAdd「Mention」" + "\n" + \
"╠➥ MimicDel「Mention」" + "\n" + \
"╠➥ Mention" + "\n" + \
"╠➥ Lurking「Oɴ/Off/Reset」" + "\n" + \
"╠➥ Lurking" + "\n" + \
"║" + "\n" + \
"╠✪〘 Media 〙✪════════" + "\n" + \
"╠➥ Kalender" + "\n" + \
"╠➥ CheckDate「Date」" + "\n" + \
"╠➥ InstagramInfo「UserName」" + "\n" + \
"╠➥ InstagramPost「UserName」" + "\n" + \
"╠➥ SearchYoutube「Search」" + "\n" + \
"╠➥ SearchMusic「Search」" + "\n" + \
"╠➥ SearchLyric「Search」" + "\n" + \
"╠➥ SearchImage「Search」" + "\n" + \
"╠➥ ScreenshootWebsite「LinkUrl」" + "\n" + \
"║" + "\n" + \
"╠✪〘 Bot 〙✪═════════" + "\n" + \
"╠➥ AdminAdd" + "\n" + \
"╠➥ AdminDel" + "\n" + \
"╠➥ AdminList" + "\n" + \
"╠➥ OwnerAdd" + "\n" + \
"╠➥ OwnerDel" + "\n" + \
"╠➥ OwnerList" + "\n" + \
"╠➥ BanContact" + "\n" + \
"╠➥ UnbanContact" + "\n" + \
"╠➥ BanList" + "\n" + \
"╠➥ ClearBan" + "\n" + \
"╠➥ Respon" + "\n" + \
"╠➥ Absen" + "\n" + \
"╠➥ JoinAll" + "\n" + \
"╠➥ ByeAll" + "\n" + \
"║" + "\n" + \
"╚═〘 Credits By: ©DEKIL SANGE 2019™ 〙"
return helpMessage
def helptexttospeech():
helpTextToSpeech = "╔══〘 T E X T T O S P E E C H 〙" + "\n" + \
"╠ af : Afrikaans" + "\n" + \
"╠ sq : Albanian" + "\n" + \
"╠ ar : Arabic" + "\n" + \
"╠ hy : Armenian" + "\n" + \
"╠ bn : Bengali" + "\n" + \
"╠ ca : Catalan" + "\n" + \
"╠ zh : Chinese" + "\n" + \
"╠ zh-cn : Chinese (Mandarin/China)" + "\n" + \
"╠ zh-tw : Chinese (Mandarin/Taiwan)" + "\n" + \
"╠ zh-yue : Chinese (Cantonese)" + "\n" + \
"╠ hr : Croatian" + "\n" + \
"╠ cs : Czech" + "\n" + \
"╠ da : Danish" + "\n" + \
"╠ nl : Dutch" + "\n" + \
"╠ en : English" + "\n" + \
"╠ en-au : English (Australia)" + "\n" + \
"╠ en-uk : English (United Kingdom)" + "\n" + \
"╠ en-us : English (United States)" + "\n" + \
"╠ eo : Esperanto" + "\n" + \
"╠ fi : Finnish" + "\n" + \
"╠ fr : French" + "\n" + \
"╠ de : German" + "\n" + \
"╠ el : Greek" + "\n" + \
"╠ hi : Hindi" + "\n" + \
"╠ hu : Hungarian" + "\n" + \
"╠ is : Icelandic" + "\n" + \
"╠ id : Indonesian" + "\n" + \
"╠ it : Italian" + "\n" + \
"╠ ja : Japanese" + "\n" + \
"╠ km : Khmer (Cambodian)" + "\n" + \
"╠ ko : Korean" + "\n" + \
"╠ la : Latin" + "\n" + \
"╠ lv : Latvian" + "\n" + \
"╠ mk : Macedonian" + "\n" + \
"╠ no : Norwegian" + "\n" + \
"╠ pl : Polish" + "\n" + \
"╠ pt : Portuguese" + "\n" + \
"╠ ro : Romanian" + "\n" + \
"╠ ru : Russian" + "\n" + \
"╠ sr : Serbian" + "\n" + \
"╠ si : Sinhala" + "\n" + \
"╠ sk : Slovak" + "\n" + \
"╠ es : Spanish" + "\n" + \
"╠ es-es : Spanish (Spain)" + "\n" + \
"╠ es-us : Spanish (United States)" + "\n" + \
"╠ sw : Swahili" + "\n" + \
"╠ sv : Swedish" + "\n" + \
"╠ ta : Tamil" + "\n" + \
"╠ th : Thai" + "\n" + \
"╠ tr : Turkish" + "\n" + \
"╠ uk : Ukrainian" + "\n" + \
"╠ vi : Vietnamese" + "\n" + \
"╠ cy : Welsh" + "\n" + \
"╚══〘 Jangan Typo 〙" + "\n" + "\n\n" + \
"Contoh : say-en DEKIL NGACENG"
return helpTextToSpeech
def helptranslate():
helpTranslate = "╔══〘 T R A N S L A T E 〙" + "\n" + \
"╠ af : afrikaans" + "\n" + \
"╠ sq : albanian" + "\n" + \
"╠ am : amharic" + "\n" + \
"╠ ar : arabic" + "\n" + \
"╠ hy : armenian" + "\n" + \
"╠ az : azerbaijani" + "\n" + \
"╠ eu : basque" + "\n" + \
"╠ be : belarusian" + "\n" + \
"╠ bn : bengali" + "\n" + \
"╠ bs : bosnian" + "\n" + \
"╠ bg : bulgarian" + "\n" + \
"╠ ca : catalan" + "\n" + \
"╠ ceb : cebuano" + "\n" + \
"╠ ny : chichewa" + "\n" + \
"╠ zh-cn : chinese (simplified)" + "\n" + \
"╠ zh-tw : chinese (traditional)" + "\n" + \
"╠ co : corsican" + "\n" + \
"╠ hr : croatian" + "\n" + \
"╠ cs : czech" + "\n" + \
"╠ da : danish" + "\n" + \
"╠ nl : dutch" + "\n" + \
"╠ en : english" + "\n" + \
"╠ eo : esperanto" + "\n" + \
"╠ et : estonian" + "\n" + \
"╠ tl : filipino" + "\n" + \
"╠ fi : finnish" + "\n" + \
"╠ fr : french" + "\n" + \
"╠ fy : frisian" + "\n" + \
"╠ gl : galician" + "\n" + \
"╠ ka : georgian" + "\n" + \
"╠ de : german" + "\n" + \
"╠ el : greek" + "\n" + \
"╠ gu : gujarati" + "\n" + \
"╠ ht : haitian creole" + "\n" + \
"╠ ha : hausa" + "\n" + \
"╠ haw : hawaiian" + "\n" + \
"╠ iw : hebrew" + "\n" + \
"╠ hi : hindi" + "\n" + \
"╠ hmn : hmong" + "\n" + \
"╠ hu : hungarian" + "\n" + \
"╠ is : icelandic" + "\n" + \
"╠ ig : igbo" + "\n" + \
"╠ id : indonesian" + "\n" + \
"╠ ga : irish" + "\n" + \
"╠ it : italian" + "\n" + \
"╠ ja : japanese" + "\n" + \
"╠ jw : javanese" + "\n" + \
"╠ kn : kannada" + "\n" + \
"╠ kk : kazakh" + "\n" + \
"╠ km : khmer" + "\n" + \
"╠ ko : korean" + "\n" + \
"╠ ku : kurdish (kurmanji)" + "\n" + \
"╠ ky : kyrgyz" + "\n" + \
"╠ lo : lao" + "\n" + \
"╠ la : latin" + "\n" + \
"╠ lv : latvian" + "\n" + \
"╠ lt : lithuanian" + "\n" + \
"╠ lb : luxembourgish" + "\n" + \
"╠ mk : macedonian" + "\n" + \
"╠ mg : malagasy" + "\n" + \
"╠ ms : malay" + "\n" + \
"╠ ml : malayalam" + "\n" + \
"╠ mt : maltese" + "\n" + \
"╠ mi : maori" + "\n" + \
"╠ mr : marathi" + "\n" + \
"╠ mn : mongolian" + "\n" + \
"╠ my : myanmar (burmese)" + "\n" + \
"╠ ne : nepali" + "\n" + \
"╠ no : norwegian" + "\n" + \
"╠ ps : pashto" + "\n" + \
"╠ fa : persian" + "\n" + \
"╠ pl : polish" + "\n" + \
"╠ pt : portuguese" + "\n" + \
"╠ pa : punjabi" + "\n" + \
"╠ ro : romanian" + "\n" + \
"╠ ru : russian" + "\n" + \
"╠ sm : samoan" + "\n" + \
"╠ gd : scots gaelic" + "\n" + \
"╠ sr : serbian" + "\n" + \
"╠ st : sesotho" + "\n" + \
"╠ sn : shona" + "\n" + \
"╠ sd : sindhi" + "\n" + \
"╠ si : sinhala" + "\n" + \
"╠ sk : slovak" + "\n" + \
"╠ sl : slovenian" + "\n" + \
"╠ so : somali" + "\n" + \
"╠ es : spanish" + "\n" + \
"╠ su : sundanese" + "\n" + \
"╠ sw : swahili" + "\n" + \
"╠ sv : swedish" + "\n" + \
"╠ tg : tajik" + "\n" + \
"╠ ta : tamil" + "\n" + \
"╠ te : telugu" + "\n" + \
"╠ th : thai" + "\n" + \
"╠ tr : turkish" + "\n" + \
"╠ uk : ukrainian" + "\n" + \
"╠ ur : urdu" + "\n" + \
"╠ uz : uzbek" + "\n" + \
"╠ vi : vietnamese" + "\n" + \
"╠ cy : welsh" + "\n" + \
"╠ xh : xhosa" + "\n" + \
"╠ yi : yiddish" + "\n" + \
"╠ yo : yoruba" + "\n" + \
"╠ zu : zulu" + "\n" + \
"╠ fil : Filipino" + "\n" + \
"╠ he : Hebrew" + "\n" + \
"╚══〘 Jangan Typo 〙" + "\n" + "\n\n" + \
"Contoh : tr-en DEKIL NGACENG"
return helpTranslate
#==============================================================================#
def backupData():
try:
backup = settings
f = codecs.open('temp.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
backup = read
f = codecs.open('read.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
return True
except Exception as error:
logError(error)
return False
def command(text):
pesan = text.lower()
if pesan.startswith(settings["keyCommand"]):
cmd = pesan.replace(settings["keyCommand"],"")
else:
cmd = "Undefined command"
return cmd
def lineBot(op):
try:
if op.type == 0:
print ("[ 0 ] END OF OPERATION")
return
if op.type == 5:
print ("[ 5 ] NOTIFIED ADD CONTACT")
if settings["autoAdd"] == True:
nadya.sendMessage(op.param1, "Halo {} terimakasih telah menambahkan saya sebagai teman :D".format(str(nadya.getContact(op.param1).displayName)))
if op.type == 13:
print ("[ 13 ] NOTIFIED INVITE INTO GROUP")
group = nadya.getGroup(op.param1)
contact = nadya.getContact(op.param2)
if settings["autoJoin"] == True:
if settings["autoReject"]["status"] == True:
if len(group.members) > settings["autoReject"]["members"]:
nadya.acceptGroupInvitation(op.param1)
else:
nadya.rejectGroupInvitation(op.param1)
else:
nadya.acceptGroupInvitation(op.param1)
gInviMids = []
for z in group.invitee:
if z.mid in op.param3:
gInviMids.append(z.mid)
listContact = ""
if gInviMids != []:
for j in gInviMids:
name_ = nadya.getContact(j).displayName
listContact += "\n + {}".format(str(name_))
arg = " Group Name : {}".format(str(group.name))
arg += "\n Executor : {}".format(str(contact.displayName))
arg += "\n List User Invited : {}".format(str(listContact))
print (arg)
if op.type == 17:
print ("[ 17 ] NOTIFIED ACCEPT GROUP INVITATION")
group = nadya.getGroup(op.param1)
contact = nadya.getContact(op.param2)
arg = " Group Name : {}".format(str(group.name))
arg += "\n User Join : {}".format(str(contact.displayName))
print (arg)
if op.type == 17:
if op.param2 in admin:
if op.param2 not in Bots:
return
ginfo = nadya.getGroup(op.param1)
contact = nadya.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
c = Message(to=op.param1, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
nadya.sendMessage(c)
nadya.sendText(op.param1,"Hallo " + nadya.getContact(op.param2).displayName + "\nWelcome To ☞ " + str(ginfo.name) + " ☜" + "\nBudayakan Cek Note\nDan Semoga Betah Disini ^_^")
nadya.sendImageWithURL(op.param1,image)
d = Message(to=op.param1, text=None, contentType=7)
d.contentMetadata={
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
nadya.sendMessage(d)
print ("MEMBER JOIN TO GROUP")
if op.type == 19:
print ("[ 19 ] NOTIFIED KICKOUT FROM GROUP")
group = nadya.getGroup(op.param1)
contact = nadya.getContact(op.param2)
victim = nadya.getContact(op.param3)
arg = " Group Name : {}".format(str(group.name))
arg += "\n Executor : {}".format(str(contact.displayName))
arg += "\n Victim : {}".format(str(victim.displayName))
print (arg)
if op.type == 24:
print ("[ 24 ] NOTIFIED LEAVE ROOM")
if settings["autoLeave"] == True:
nadya.leaveRoom(op.param1)
#-------------------------------------------------------------------------------
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if settings["wblack"] == True:
if msg.contentMetadata["mid"] in settings["commentBlack"]:
nadya.sendMessage(msg.to,"sudah masuk daftar hitam")
settings["wblack"] = False
else:
settings["commentBlack"][msg.contentMetadata["mid"]] = True
settings["wblack"] = False
nadya.sendMessage(msg.to,"Itu tidak berkomentar")
elif settings["dblack"] == True:
if msg.contentMetadata["mid"] in settings["commentBlack"]:
del settings["commentBlack"][msg.contentMetadata["mid"]]
nadya.sendMessage(msg.to,"Done")
settings["dblack"] = False
else:
settings["dblack"] = False
nadya.sendMessage(msg.to,"Tidak ada dalam daftar hitam")
#-------------------------------------------------------------------------------
elif settings["wblacklist"] == True:
if msg.contentMetadata["mid"] in settings["blacklist"]:
nadya.sendMessage(msg.to,"sudah masuk daftar hitam")
settings["wblacklist"] = False
else:
settings["blacklist"][msg.contentMetadata["mid"]] = True
settings["wblacklist"] = False
nadya.sendMessage(msg.to,"Done")
elif settings["dblacklist"] == True:
if msg.contentMetadata["mid"] in settings["blacklist"]:
del settings["blacklist"][msg.contentMetadata["mid"]]
nadya.sendMessage(msg.to,"Done")
settings["dblacklist"] = False
else:
settings["dblacklist"] = False
nadya.sendMessage(msg.to,"Done")
#-------------------------------------------------------------------------------
if op.type == 26:
print ("[ 26 ] SEND MESSAGE COMMAND")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != nadya.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if msg.contentType == 0:
if text is None:
return
#==============================================================================#
if text.lower() == 'help':
helpMessage = helpmessage()
nadya.sendMessage(to, str(helpMessage))
nadya.sendContact(to, "")
elif text.lower() == 'texttospeech':
helpTextToSpeech = helptexttospeech()
nadya.sendMessage(to, str(helpTextToSpeech))
elif text.lower() == 'translate':
helpTranslate = helptranslate()
nadya.sendMessage(to, str(helpTranslate))
#==============================================================================#
elif text.lower() == 'speed':
start = time.time()
nadya.sendMessage(to, "Please Wait...")
elapsed_time = time.time() - start
nadya.sendMessage(to,format(str(elapsed_time)))
elif text.lower() == 'restart':
if msg._from in Owner:
nadya.sendMessage(to, "Please Wait...")
time.sleep(5)
nadya.sendMessage(to, "Restart Sukses")
restartBot()
elif text.lower() == 'runtime':
timeNow = time.time()
runtime = timeNow - botStart
runtime = format_timespan(runtime)
nadya.sendMessage(to, "Bot sudah berjalan selama {}".format(str(runtime)))
elif text.lower() == 'about':
try:
arr = []
owner = "u14f64e139a3817afaabe27d237afb36b"
creator = nadya.getContact(owner)
contact = nadya.getContact(nadyaMID)
grouplist = nadya.getGroupIdsJoined()
contactlist = nadya.getAllContactIds()
blockedlist = nadya.getBlockedContactIds()
ret_ = "╔══[ About Self ]"
ret_ += "\n╠ Line : {}".format(contact.displayName)
ret_ += "\n╠ Group : {}".format(str(len(grouplist)))
ret_ += "\n╠ Friend : {}".format(str(len(contactlist)))
ret_ += "\n╠ Blocked : {}".format(str(len(blockedlist)))
ret_ += "\n╠══[ About Selfbot ]"
ret_ += "\n╠ Version : Premium"
ret_ += "\n╠ Creator : {}".format(creator.displayName)
ret_ += "\n╚══[ Dilarang Remake Tanpa Ijin :P ]"
nadya.sendMessage(to, str(ret_))
except Exception as e:
nadya.sendMessage(msg.to, str(e))
#==============================================================================#
elif text.lower() == 'status':
try:
ret_ = "╔══[ Status ]"
if settings["protect"] == True: ret_ += "\n╠ Protect ✅"
else: ret_ += "\n╠ Protect ❌"
if settings["qrprotect"] == True: ret_ += "\n╠ Qr Protect ✅"
else: ret_ += "\n╠ Qr Protect ❌"
if settings["inviteprotect"] == True: ret_ += "\n╠ Invite Protect ✅"
else: ret_ += "\n╠ Invite Protect ❌"
if settings["cancelprotect"] == True: ret_ += "\n╠ Cancel Protect ✅"
else: ret_ += "\n╠ Cancel Protect ❌"
if settings["autoAdd"] == True: ret_ += "\n╠ Auto Add ✅"
else: ret_ += "\n╠ Auto Add ❌"
if settings["autoJoin"] == True: ret_ += "\n╠ Auto Join ✅"
else: ret_ += "\n╠ Auto Join ❌"
if settings["autoLeave"] == True: ret_ += "\n╠ Auto Leave ✅"
else: ret_ += "\n╠ Auto Leave ❌"
if settings["autoRead"] == True: ret_ += "\n╠ Auto Read ✅"
else: ret_ += "\n╠ Auto Read ❌"
if settings["checkSticker"] == True: ret_ += "\n╠ Check Sticker ✅"
else: ret_ += "\n╠ Check Sticker ❌"
if settings["detectMention"] == True: ret_ += "\n╠ Detect Mention ✅"
else: ret_ += "\n╠ Detect Mention ❌"
ret_ += "\n╚══[ Status ]"
nadya.sendMessage(to, str(ret_))
except Exception as e:
nadya.sendMessage(msg.to, str(e))
#-------------------------------------------------------------------------------
elif msg.text.lower().startswith("owneradd "):
if msg._from in Owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
Owner[target] = True
f=codecs.open('Owner.json','w','utf-8')
json.dump(Owner, f, sort_keys=True, indent=4,ensure_ascii=False)
nadya.sendMessage(msg.to,"Owner ☢-Bot-☢\nAdd\nExecuted")
except:
pass
else:
nadya.sendMessage(msg.to,"Owner Permission Required")
elif msg.text.lower().startswith("ownerdel "):
if msg._from in Owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del Owner[target]
f=codecs.open('Owner.json','w','utf-8')
json.dump(Owner, f, sort_keys=True, indent=4,ensure_ascii=False)
nadya.sendMessage(msg.to,"Owner ☢-Bot-☢\nRemove\nExecuted")
except:
pass
else:
nadya.sendMessage(msg.to,"Owner Permission Required")
#-------------------------------------------------------------------------------
elif text.lower() == 'ownerlist':
if msg._from in Owner:
if Owner == []:
nadya.sendMessage(msg.to,"The Ownerlist is empty")
else:
nadya.sendMessage(msg.to,"Tunggu...")
mc = "╔═══════════════\n╠♥ ✿✿✿ NADYA_TJ ✿✿✿ ♥\n╠══✪〘 Owner List 〙✪═══\n"
for mi_d in admin:
mc += "╠✪ " +nadya.getContact(mi_d).displayName + "\n"
nadya.sendMessage(msg.to,mc + "╠═══════════════\n╠✪〘 line.me/ti/p/~nad_nad. 〙\n╚═══════════════")
#-------------------------------------------------------------------------------
elif msg.text.lower().startswith("adminadd "):
if msg._from in Owner:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
admin[target] = True
f=codecs.open('admin.json','w','utf-8')
json.dump(admin, f, sort_keys=True, indent=4,ensure_ascii=False)
nadya.sendMessage(msg.to,"Admin ☢-Bot-☢\nAdd\nExecuted")
break
except:
nadya.sendMessage(msg.to,"Added Target Fail !")
break
else:
nadya.sendMessage(msg.to,"Owner Permission Required")
elif msg.text.lower().startswith("admindel "):
if msg._from in Owner:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del admin[target]
f=codecs.open('admin.json','w','utf-8')
json.dump(admin, f, sort_keys=True, indent=4,ensure_ascii=False)
nadya.sendMessage(msg.to,"Admin ☢-Bot-☢\nRemove\nExecuted")
break
except:
nadya.sendMessage(msg.to,"Deleted Target Fail !")
break
else:
nadya.sendMessage(msg.to,"Owner Permission Required")
#-------------------------------------------------------------------------------
elif text.lower() == 'adminlist':
if msg._from in Owner:
if admin == []:
nadya.sendMessage(msg.to,"The Adminlist is empty")
else:
nadya.sendMessage(msg.to,"Tunggu...")
mc = "╔═══════════════\n╠♥ ✿✿✿ NADYA_TJ ✿✿✿ ♥\n╠══✪〘 Admin List 〙✪═══\n"
for mi_d in admin:
mc += "╠✪ " +nadya.getContact(mi_d).displayName + "\n"
nadya.sendMessage(msg.to,mc + "╠═══════════════\n╠✪〘 line.me/ti/p/~nad_nad. 〙\n╚═══════════════")
#-------------------------------------------------------------------------------
elif text.lower() == 'protect on':
if msg._from in Owner:
if settings["protect"] == True:
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Already On")
else:
nadya.sendMessage(msg.to,"➲ Protection Set To On")
else:
settings["protect"] = True
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Set To On")
else:
nadya.sendMessage(msg.to,"➲ Protection Already On")
elif text.lower() == 'protect off':
if msg._from in Owner:
if settings["protect"] == False:
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Already Off")
else:
nadya.sendMessage(msg.to,"➲ Protection Set To Off")
else:
settings["protect"] = False
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Set To Off")
else:
nadya.sendMessage(msg.to,"➲ Protection Already Off")
#----------------------------------------------------------------------------------------
elif text.lower() == 'qrprotect on':
if msg._from in Owner:
if settings["qrprotect"] == True:
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Qr Already On")
else:
nadya.sendMessage(msg.to,"➲ Protection Qr Set To On")
else:
settings["qrprotect"] = True
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Qr Set To On")
else:
nadya.sendMessage(msg.to,"➲ Protection Qr Already On")
elif text.lower() == 'qrprotect off':
if msg._from in Owner:
if settings["qrprotect"] == False:
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Qr Already Off")
else:
nadya.sendMessage(msg.to,"➲ Protection Qr Set To Off")
else:
settings["qrprotect"] = False
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Qr Set To Off")
else:
nadya.sendMessage(msg.to,"➲ Protection Qr Already Off")
#-------------------------------------------------------------------------------
elif text.lower() == 'inviteprotect on':
if msg._from in Owner:
if settings["inviteprotect"] == True:
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Invite Already On")
else:
nadya.sendMessage(msg.to,"➲ Protection Invite Set To On")
else:
settings["inviteprotect"] = True
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Invite Set To On")
else:
nadya.sendMessage(msg.to,"➲ Protection Invite Already On")
elif text.lower() == 'inviteprotect off':
if msg._from in Owner:
if settings["inviteprotect"] == False:
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Invite Already Off")
else:
nadya.sendMessage(msg.to,"➲ Protection Invite Set To Off")
else:
settings["inviteprotect"] = False
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Invite Set To Off")
else:
nadya.sendMessage(msg.to,"➲ Protection Invite Already Off")
#-------------------------------------------------------------------------------
elif text.lower() == 'cancelprotect on':
if msg._from in Owner:
if settings["cancelprotect"] == True:
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Cancel Invite Already On")
else:
nadya.sendMessage(msg.to,"➲ Protection Cancel Invite Set To On")
else:
settings["cancelprotect"] = True
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Cancel Invite Set To On")
else:
nadya.sendMessage(msg.to,"➲ Protection Cancel Invite Already On")
elif text.lower() == 'cancelprotect off':
if msg._from in Owner:
if settings["cancelprotect"] == False:
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Cancel Invite Already Off")
else:
nadya.sendMessage(msg.to,"➲ Protection Cancel Invite Set To Off")
else:
settings["cancelprotect"] = False
if settings["lang"] == "JP":
nadya.sendMessage(msg.to,"➲ Protection Cancel Invite Set To Off")
else:
nadya.sendMessage(msg.to,"➲ Protection Cancel Invite Already Off")
#-------------------------------------------------------------------------------
elif text.lower() == 'setpro on':
if msg._from in Owner:
settings["protect"] = True
settings["qrprotect"] = True
settings["inviteprotect"] = True
settings["cancelprotect"] = True
nadya.sendMessage(msg.to,"➲ All Protect Set To On")
else:
nadya.sendMessage(msg.to,"Just for Owner")
elif text.lower() == 'setpro off':
if msg._from in Owner:
settings["protect"] = False
settings["qrprotect"] = False
settings["inviteprotect"] = False
settings["cancelprotect"] = False
nadya.sendMessage(msg.to,"➲ All Protect Set To Off")
else:
nadya.sendMessage(msg.to,"Just for Owner")
#-------------------------------------------------------------------------------
elif text.lower() == 'autoadd on':
settings["autoAdd"] = True
nadya.sendMessage(to, "Berhasil mengaktifkan Auto Add")
elif text.lower() == 'autoadd off':
settings["autoAdd"] = False
nadya.sendMessage(to, "Berhasil menonaktifkan Auto Add")
elif text.lower() == 'autojoin on':
if msg._from in Owner:
settings["autoJoin"] = True
nadya.sendMessage(to, "Berhasil mengaktifkan Auto Join")
elif text.lower() == 'autojoin off':
if msg._from in Owner:
settings["autoJoin"] = False
nadya.sendMessage(to, "Berhasil menonaktifkan Auto Join")
elif text.lower() == 'autoleave on':
if msg._from in Owner:
settings["autoLeave"] = True
nadya.sendMessage(to, "Berhasil mengaktifkan Auto Leave")
elif text.lower() == 'autoleave off':
if msg._from in Owner:
settings["autoLeave"] = False
nadya.sendMessage(to, "Berhasil menonaktifkan Auto Leave")
elif text.lower() == 'autoread on':
settings["autoRead"] = True
nadya.sendMessage(to, "Berhasil mengaktifkan Auto Read")
elif text.lower() == 'autoread off':
settings["autoRead"] = False
nadya.sendMessage(to, "Berhasil menonaktifkan Auto Read")
elif text.lower() == 'checksticker on':
settings["checkSticker"] = True
nadya.sendMessage(to, "Berhasil mengaktifkan Check Details Sticker")
elif text.lower() == 'checksticker off':
settings["checkSticker"] = False
nadya.sendMessage(to, "Berhasil menonaktifkan Check Details Sticker")
elif text.lower() == 'detectmention on':
settings["datectMention"] = True
nadya.sendMessage(to, "Berhasil mengaktifkan Detect Mention")
elif text.lower() == 'detectmention off':
settings["datectMention"] = False
nadya.sendMessage(to, "Berhasil menonaktifkan Detect Mention")
elif text.lower() == 'autojoinlink on':
settings["autoJoinTicket"] = True
nadya.sendMessage(to, "Berhasil mengaktifkan Auto Join Link")
elif text.lower() == 'autojoinlink off':
settings["autoJoinTicket"] = False
nadya.sendMessage(to, "Berhasil menonaktifkan Auto Join Link")
#==============================================================================#
elif text.lower() == "respon":
nadya.sendMessage(msg.to,responsename)
ki.sendMessage(msg.to,responsename2)
ki2.sendMessage(msg.to,responsename3)
ki3.sendMessage(msg.to,responsename4)
ki4.sendMessage(msg.to,responsename5)
elif msg.text.lower() == 'absen':
if msg._from in Owner:
nadya.sendContact(to, nadyaMID)
ki.sendContact(to, kiMID)
ki2.sendContact(to, ki2MID)
ki3.sendContact(to, ki3MID)
ki4.sendContact(to, ki4MID)
elif text.lower() in ["byeall"]:
if msg._from in Owner:
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
elif text.lower() in ["joinall"]:
if msg._from in Owner:
G = nadya.getGroup(msg.to)
ginfo = nadya.getGroup(msg.to)
G.preventedJoinByTicket = False
nadya.updateGroup(G)
invsend = 0
Ticket = nadya.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
G = nadya.getGroup(msg.to)
G.preventedJoinByTicket = True
nadya.updateGroup(G)
G.preventedJoinByTicket(G)
nadya.updateGroup(G)
elif text.lower() == 'me':
sendMessageWithMention(to, nadyaMID)
nadya.sendContact(to, nadyaMID)
elif text.lower() == 'mymid':
nadya.sendMessage(msg.to,"[MID]\n" + nadyaMID)
elif text.lower() == 'myname':
me = nadya.getContact(nadyaMID)
nadya.sendMessage(msg.to,"[DisplayName]\n" + me.displayName)
elif text.lower() == 'mybio':
me = nadya.getContact(nadyaMID)
nadya.sendMessage(msg.to,"[StatusMessage]\n" + me.statusMessage)
elif text.lower() == 'mypicture':
me = nadya.getContact(nadyaMID)
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
elif text.lower() == 'myvideoprofile':
me = nadya.getContact(nadyaMID)
nadya.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus + "/vp")
elif text.lower() == 'mycover':
me = nadya.getContact(nadyaMID)
cover = nadya.getProfileCoverURL(nadyaMID)
nadya.sendImageWithURL(msg.to, cover)
elif msg.text.lower().startswith("stealcontact "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = nadya.getContact(ls)
mi_d = contact.mid
nadya.sendContact(msg.to, mi_d)
elif msg.text.lower().startswith("stealmid "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
ret_ = "[ Mid User ]"
for ls in lists:
ret_ += "\n{}" + ls
nadya.sendMessage(msg.to, str(ret_))
elif msg.text.lower().startswith("stealname "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = nadya.getContact(ls)
nadya.sendMessage(msg.to, "[ Display Name ]\n" + contact.displayName)
elif msg.text.lower().startswith("stealbio "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = nadya.getContact(ls)
nadya.sendMessage(msg.to, "[ Status Message ]\n{}" + contact.statusMessage)
elif msg.text.lower().startswith("stealpicture "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = "http://dl.profile.nadya.naver.jp/" + nadya.getContact(ls).pictureStatus
nadya.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("stealvideoprofile "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = "http://dl.profile.nadya.naver.jp/" + nadya.getContact(ls).pictureStatus + "/vp"
nadya.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("stealcover "):
if line != None:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = nadya.getProfileCoverURL(ls)
nadya.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("cloneprofile "):
if msg._from in Owner:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
contact = mention["M"]
break
try:
nadya.cloneContactProfile(contact)
nadya.sendMessage(msg.to, "Berhasil clone member tunggu beberapa saat sampai profile berubah")
except:
nadya.sendMessage(msg.to, "Gagal clone member")
elif text.lower() == 'restoreprofile':
if msg._from in Owner:
try:
nadyaProfile.displayName = str(myProfile["displayName"])
nadyaProfile.statusMessage = str(myProfile["statusMessage"])
nadyaProfile.pictureStatus = str(myProfile["pictureStatus"])
nadya.updateProfileAttribute(8, nadyaProfile.pictureStatus)
nadya.updateProfile(nadyaProfile)
nadya.sendMessage(msg.to, "Berhasil restore profile tunggu beberapa saat sampai profile berubah")
except:
nadya.sendMessage(msg.to, "Gagal restore profile")
#==============================================================================#
elif msg.text.lower().startswith("mimicadd "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["mimic"]["target"][target] = True
nadya.sendMessage(msg.to,"Target ditambahkan!")
break
except:
nadya.sendMessage(msg.to,"Added Target Fail !")
break
elif msg.text.lower().startswith("mimicdel "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["mimic"]["target"][target]
nadya.sendMessage(msg.to,"Target dihapuskan!")
break
except:
nadya.sendMessage(msg.to,"Deleted Target Fail !")
break
elif text.lower() == 'mimiclist':
if settings["mimic"]["target"] == {}:
nadya.sendMessage(msg.to,"Tidak Ada Target")
else:
mc = "╔══[ Mimic List ]"
for mi_d in settings["mimic"]["target"]:
mc += "\n╠ "+nadya.getContact(mi_d).displayName
nadya.sendMessage(msg.to,mc + "\n╚══[ Finish ]")
elif "mimic" in msg.text.lower():
sep = text.split(" ")
mic = text.replace(sep[0] + " ","")
if mic == "on":
if settings["mimic"]["status"] == False:
settings["mimic"]["status"] = True
nadya.sendMessage(msg.to,"Reply Message on")
elif mic == "off":
if settings["mimic"]["status"] == True:
settings["mimic"]["status"] = False
nadya.sendMessage(msg.to,"Reply Message off")
#==============================================================================#
elif text.lower() == 'groupcreator':
group = nadya.getGroup(to)
GS = group.creator.mid
nadya.sendContact(to, GS)
elif text.lower() == 'groupid':
gid = nadya.getGroup(to)
nadya.sendMessage(to, "[ID Group : ]\n" + gid.id)
elif text.lower() == 'grouppicture':
group = nadya.getGroup(to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
nadya.sendImageWithURL(to, path)
elif text.lower() == 'groupname':
gid = nadya.getGroup(to)
nadya.sendMessage(to, "[Nama Group : ]\n" + gid.name)
elif text.lower() == 'groupticket':
if msg.toType == 2:
group = nadya.getGroup(to)
if group.preventedJoinByTicket == False:
ticket = nadya.reissueGroupTicket(to)
nadya.sendMessage(to, "[ Group Ticket ]\nhttps://line.me/R/ti/g/{}".format(str(ticket)))
else:
nadya.sendMessage(to, "Grup qr tidak terbuka silahkan buka terlebih dahulu dengan perintah {}openqr".format(str(settings["keyCommand"])))
elif text.lower() == 'groupticket on':
if msg.toType == 2:
group = nadya.getGroup(to)
if group.preventedJoinByTicket == False:
nadya.sendMessage(to, "Grup qr sudah terbuka")
else:
group.preventedJoinByTicket = False
nadya.updateGroup(group)
nadya.sendMessage(to, "Berhasil membuka grup qr")
elif text.lower() == 'groupticket off':
if msg.toType == 2:
group = nadya.getGroup(to)
if group.preventedJoinByTicket == True:
nadya.sendMessage(to, "Grup qr sudah tertutup")
else:
group.preventedJoinByTicket = True
nadya.updateGroup(group)
nadya.sendMessage(to, "Berhasil menutup grup qr")
elif text.lower() == 'groupinfo':
group = nadya.getGroup(to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Tidak ditemukan"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(nadya.reissueGroupTicket(group.id)))
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
ret_ = "╔══[ Group Info ]"
ret_ += "\n╠ Nama Group : {}".format(str(group.name))
ret_ += "\n╠ ID Group : {}".format(group.id)
ret_ += "\n╠ Pembuat : {}".format(str(gCreator))
ret_ += "\n╠ Jumlah Member : {}".format(str(len(group.members)))
ret_ += "\n╠ Jumlah Pending : {}".format(gPending)
ret_ += "\n╠ Group Qr : {}".format(gQr)
ret_ += "\n╠ Group Ticket : {}".format(gTicket)
ret_ += "\n╚══[ Finish ]"
nadya.sendMessage(to, str(ret_))
nadya.sendImageWithURL(to, path)
elif text.lower() == 'groupmemberlist':
if msg.toType == 2:
group = nadya.getGroup(to)
ret_ = "╔══[ Member List ]"
no = 0 + 1
for mem in group.members:
ret_ += "\n╠ {}. {}".format(str(no), str(mem.displayName))
no += 1
ret_ += "\n╚══[ Total {} ]".format(str(len(group.members)))
nadya.sendMessage(to, str(ret_))
elif text.lower() == 'grouplist':
groups = nadya.groups
ret_ = "╔══[ Group List ]"
no = 0 + 1
for gid in groups:
group = nadya.getGroup(gid)
ret_ += "\n╠ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n╚══[ Total {} Groups ]".format(str(len(groups)))
nadya.sendMessage(to, str(ret_))
#-------------------------------------------------------------------------------
elif text.lower() == 'clearban':
if msg._from in Owner:
settings["blacklist"] = {}
nadya.sendMessage(msg.to,"Blacklist Dibersihkan")
elif text.lower() == 'bancontact':
if msg._from in Owner:
settings["wblacklist"] = True
nadya.sendMessage(msg.to,"Send Contact")
elif msg.text in ["unbancontact"]:
if msg._from in Owner:
settings["dblacklist"] = True
nadya.sendMessage(msg.to,"Send Contact")
#-------------------------------------------------------------------------------
elif text.lower() == 'banlist':
if msg._from in Owner:
if settings["blacklist"] == {}:
nadya.sendMessage(msg.to,"Tidak Ada Banlist")
else:
nadya.sendMessage(msg.to,"Daftar Banlist")
num=1
msgs="══════════List Blacklist═════════"
for mi_d in settings["blacklist"]:
msgs+="\n[%i] %s" % (num, nadya.getContact(mi_d).displayName)
num=(num+1)
msgs+="\n══════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(settings["blacklist"])
nadya.sendMessage(msg.to, msgs)
#=======================================================================================
elif msg.text.lower().startswith("kill "):
if msg._from in Owner:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
except:
random.choice(KAC).sendText(msg.to,"Error")
#-------------------------------------------------------------------------------
elif text.lower() == 'kickallmember':
if msg._from in Owner:
if msg.toType == 2:
print ("[ 19 ] KICK ALL MEMBER")
_name = msg.text.replace("kickallmember","")
gs = nadya.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
# nadya.sendMessage(msg.to,"「 Bye All 」")
# nadya.sendMessage(msg.to,"「 Sory guys 」")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendMessage(msg.to,"Not Found")
else:
for target in targets:
if not target in Bots:
if not target in Owner:
if not target in admin:
try:
klist=[line,ki,ki2,ki3,ki4]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
nadya.sendMessage(msg.to,"")
#==============================================================================#
elif text.lower() == 'mention':
group = nadya.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//100
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*100 : (a+1)*100]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Alin \n'
nadya.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
nadya.sendMessage(to, "Total {} Mention".format(str(len(nama))))
elif text.lower() == 'lurking on':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read['readPoint']:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
nadya.sendMessage(msg.to,"Lurking already on")
else:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
nadya.sendMessage(msg.to, "Set reading point:\n" + readTime)
elif text.lower() == 'lurking off':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to not in read['readPoint']:
nadya.sendMessage(msg.to,"Lurking already off")
else:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
nadya.sendMessage(msg.to, "Delete reading point:\n" + readTime)
elif text.lower() == 'lurking reset':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read["readPoint"]:
try:
del read["readPoint"][msg.to]
del read["readMember"][msg.to]
del read["readTime"][msg.to]
except:
pass
nadya.sendMessage(msg.to, "Reset reading point:\n" + readTime)
else:
nadya.sendMessage(msg.to, "Lurking belum diaktifkan ngapain di reset?")
elif text.lower() == 'lurking':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if receiver in read['readPoint']:
if read["ROM"][receiver].items() == []:
nadya.sendMessage(receiver,"[ Reader ]:\nNone")
else:
chiya = []
for rom in read["ROM"][receiver].items():
chiya.append(rom[1])
cmem = nadya.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = '[ Reader ]:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@c\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
text = xpesan+ zxc + "\n[ Lurking time ]: \n" + readTime
try:
nadya.sendMessage(receiver, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except Exception as error:
print (error)
pass
else:
nadya.sendMessage(receiver,"Lurking has not been set.")
#==============================================================================#
elif msg.text.lower().startswith("say-af "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'af'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-sq "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'sq'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-ar "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'ar'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-hy "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'hy'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-bn "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'bn'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-ca "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'ca'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-zh "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'zh'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-zh-cn "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'zh-cn'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-zh-tw "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'zh-tw'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-zh-yue "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'zh-yue'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-hr "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'hr'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-cs "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'cs'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-da "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'da'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-nl "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'nl'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-en "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-en-au "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'en-au'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-en-uk "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'en-uk'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-en-us "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'en-us'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-eo "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'eo'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-fi "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'fi'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-fr "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'fr'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-de "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'de'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-el "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'el'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-hi "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'hi'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-hu "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'hu'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-is "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'is'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-id "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-it "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'it'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-ja "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-km "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'km'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-ko "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'ko'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-la "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'la'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-lv "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'lv'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-mk "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'mk'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-no "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'no'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-pl "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'pl'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-pt "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'pt'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-do "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'ro'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-ru "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'ru'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-sr "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'sr'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-si "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'si'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-sk "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'sk'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-es "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'es'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-es-es "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'es-es'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-es-us "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'es-us'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-sw "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'sw'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-sv "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'sv'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-ta "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'ta'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-th "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'th'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-tr "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'tr'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-uk "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'uk'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-vi "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'vi'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower().startswith("say-cy "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
lang = 'cy'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
#==============================================================================#
elif msg.text.lower().startswith("tr-af "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='af')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sq "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sq')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-am "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='am')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ar "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ar')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hy "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hy')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-az "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='az')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-eu "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='eu')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-be "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='be')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-bn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='bn')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-bs "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='bs')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-bg "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='bg')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ca "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ca')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ceb "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ceb')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ny "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ny')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-zh-cn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='zh-cn')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-zh-tw "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='zh-tw')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-co "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='co')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hr "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hr')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-cs "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='cs')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-da "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='da')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-nl "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='nl')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-en "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-et "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='et')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fi "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fi')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fr "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fr')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fy "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fy')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-gl "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='gl')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ka "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ka')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-de "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='de')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-el "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='el')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-gu "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='gu')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ht "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ht')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ha "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ha')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-haw "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='haw')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-iw "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='iw')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hi "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hi')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hmn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hmn')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hu "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hu')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-is "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='is')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ig "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ig')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-id "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ga "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ga')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-it "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='it')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ja "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-jw "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='jw')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-kn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='kn')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-kk "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='kk')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-km "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='km')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ko "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ko')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ku "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ku')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ky "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ky')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lo "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lo')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-la "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='la')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lv "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lv')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lt "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lt')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lb "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lb')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mk "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mk')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mg "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mg')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ms "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ms')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ml "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ml')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mt "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mt')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mi "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mi')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mr "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mr')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mn')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-my "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='my')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ne "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ne')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-no "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='no')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ps "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ps')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fa "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fa')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-pl "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='pl')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-pt "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='pt')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-pa "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='pa')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ro "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ro')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ru "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ru')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sm "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sm')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-gd "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='gd')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sr "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sr')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-st "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='st')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sn')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sd "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sd')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-si "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='si')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sk "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sk')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sl "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sl')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-so "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='so')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-es "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='es')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-su "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='su')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sw "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sw')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sv "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sv')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-tg "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='tg')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ta "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ta')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-te "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='te')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-th "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-tr "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='tr')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-uk "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='uk')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ur "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ur')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-uz "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='uz')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-vi "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='vi')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-cy "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='cy')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-xh "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='xh')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-yi "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='yi')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-yo "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='yo')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-zu "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='zu')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fil "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fil')
A = hasil.text
nadya.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-he "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='he')
A = hasil.text
nadya.sendMessage(msg.to, A)
#==============================================================================#
elif text.lower() == 'kalender':
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
nadya.sendMessage(msg.to, readTime)
elif "screenshotwebsite" in msg.text.lower():
sep = text.split(" ")
query = text.replace(sep[0] + " ","")
with requests.session() as web:
r = web.get("http://rahandiapi.herokuapp.com/sswebAPI?key=betakey&link={}".format(urllib.parse.quote(query)))
data = r.text
data = json.loads(data)
nadya.sendImageWithURL(to, data["result"])
elif "checkdate" in msg.text.lower():
sep = msg.text.split(" ")
tanggal = msg.text.replace(sep[0] + " ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
ret_ = "╔══[ D A T E ]"
ret_ += "\n╠ Date Of Birth : {}".format(str(data["data"]["lahir"]))
ret_ += "\n╠ Age : {}".format(str(data["data"]["usia"]))
ret_ += "\n╠ Birthday : {}".format(str(data["data"]["ultah"]))
ret_ += "\n╠ Zodiak : {}".format(str(data["data"]["zodiak"]))
ret_ += "\n╚══[ Success ]"
nadya.sendMessage(to, str(ret_))
elif "instagraminfo" in msg.text.lower():
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://www.instagram.com/{}/?__a=1".format(search))
try:
data = json.loads(r.text)
ret_ = "╔══[ Profile Instagram ]"
ret_ += "\n╠ Nama : {}".format(str(data["user"]["full_name"]))
ret_ += "\n╠ Username : {}".format(str(data["user"]["username"]))
ret_ += "\n╠ Bio : {}".format(str(data["user"]["biography"]))
ret_ += "\n╠ Pengikut : {}".format(format_number(data["user"]["followed_by"]["count"]))
ret_ += "\n╠ Diikuti : {}".format(format_number(data["user"]["follows"]["count"]))
if data["user"]["is_verified"] == True:
ret_ += "\n╠ Verifikasi : Sudah"
else:
ret_ += "\n╠ Verifikasi : Belum"
if data["user"]["is_private"] == True:
ret_ += "\n╠ Akun Pribadi : Iya"
else:
ret_ += "\n╠ Akun Pribadi : Tidak"
ret_ += "\n╠ Total Post : {}".format(format_number(data["user"]["media"]["count"]))
ret_ += "\n╚══[ https://www.instagram.com/{} ]".format(search)
path = data["user"]["profile_pic_url_hd"]
nadya.sendImageWithURL(to, str(path))
nadya.sendMessage(to, str(ret_))
except:
nadya.sendMessage(to, "Pengguna tidak ditemukan")
elif "instagrampost" in msg.text.lower():
separate = msg.text.split(" ")
user = msg.text.replace(separate[0] + " ","")
profile = "https://www.instagram.com/" + user
with requests.session() as x:
x.headers['user-agent'] = 'Mozilla/5.0'
end_cursor = ''
for count in range(1, 999):
print('PAGE: ', count)
r = x.get(profile, params={'max_id': end_cursor})
data = re.search(r'window._sharedData = (\{.+?});</script>', r.text).group(1)
j = json.loads(data)
for node in j['entry_data']['ProfilePage'][0]['user']['media']['nodes']:
if node['is_video']:
page = 'https://www.instagram.com/p/' + node['code']
r = x.get(page)
url = re.search(r'"video_url": "([^"]+)"', r.text).group(1)
print(url)
nadya.sendVideoWithURL(msg.to,url)
else:
print (node['display_src'])
nadya.sendImageWithURL(msg.to,node['display_src'])
end_cursor = re.search(r'"end_cursor": "([^"]+)"', r.text).group(1)
elif "searchimage" in msg.text.lower():
separate = msg.text.split(" ")
search = msg.text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("http://rahandiapi.herokuapp.com/imageapi?key=betakey&q={}".format(urllib.parse.quote(search)))
data = r.text
data = json.loads(data)
if data["result"] != []:
items = data["result"]
path = random.choice(items)
a = items.index(path)
b = len(items)
nadya.sendImageWithURL(to, str(path))
elif "searchyoutube" in msg.text.lower():
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
params = {"search_query": search}
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://www.youtube.com/results", params = params)
soup = BeautifulSoup(r.content, "html5lib")
ret_ = "╔══[ Youtube Result ]"
datas = []
for data in soup.select(".yt-lockup-title > a[title]"):
if "&lists" not in data["href"]:
datas.append(data)
for data in datas:
ret_ += "\n╠══[ {} ]".format(str(data["title"]))
ret_ += "\n╠ https://www.youtube.com{}".format(str(data["href"]))
ret_ += "\n╚══[ Total {} ]".format(len(datas))
nadya.sendMessage(to, str(ret_))
elif "searchmusic " in msg.text.lower():
try:
search = text.replace("searchmusic ","")
r = requests.get("https://farzain.xyz/api/joox.php?id={}".format(urllib.parse.quote(search)))
data = r.text
data = json.loads(data)
info = data["info"]
audio = data["audio"]
hasil = "「 Hasil Musik 」\n"
hasil += "\nPenyanyi : {}".format(str(info["penyanyi"]))
hasil += "\nJudul : {}".format(str(info["judul"]))
hasil += "\nAlbum : {}".format(str(info["album"]))
hasil += "\n\nLink : \n1. Image : {}".format(str(data["gambar"]))
hasil += "\n\nLink : \n2. MP3 : {}".format(str(audio["mp3"]))
hasil += "\n\nLink : \n3. M4A : {}".format(str(audio["m4a"]))
nadya.sendImageWithURL(to, str(data["gambar"]))
nadya.sendMessage(to, str(hasil))
nadya.sendMessage(to, "Downloading...")
nadya.sendMessage(to, "「 Result MP3 」")
nadya.sendAudioWithURL(to, str(audio["mp3"]))
nadya.sendMessage(to, "「 Result M4A 」")
nadya.sendVideoWithURL(to, str(audio["m4a"]))
nadya.sendMessage(to, "Success Download...")
except Exception as error:
nadya.sendMessage(to, "「 Result Error 」\n" + str(error))
elif "searchlyric" in msg.text.lower():
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
params = {'songname': search}
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://ide.fdlrcn.com/workspace/yumi-apis/joox?" + urllib.parse.urlencode(params))
try:
data = json.loads(r.text)
for song in data:
songs = song[5]
lyric = songs.replace('ti:','Title - ')
lyric = lyric.replace('ar:','Artist - ')
lyric = lyric.replace('al:','Album - ')
removeString = "[1234567890.:]"
for char in removeString:
lyric = lyric.replace(char,'')
ret_ = "╔══[ Lyric ]"
ret_ += "\n╠ Nama lagu : {}".format(str(song[0]))
ret_ += "\n╠ Durasi : {}".format(str(song[1]))
ret_ += "\n╠ Link : {}".format(str(song[4]))
ret_ += "\n╚══[ Finish ]\n{}".format(str(lyric))
nadya.sendMessage(to, str(ret_))
except:
nadya.sendMessage(to, "Lirik tidak ditemukan")
elif msg.contentType == 7:
if settings["checkSticker"] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
ret_ = "╔══[ Sticker Info ]"
ret_ += "\n╠ STICKER ID : {}".format(stk_id)
ret_ += "\n╠ STICKER PACKAGES ID : {}".format(pkg_id)
ret_ += "\n╠ STICKER VERSION : {}".format(stk_ver)
ret_ += "\n╠ STICKER URL : line://shop/detail/{}".format(pkg_id)
ret_ += "\n╚══[ Finish ]"
nadya.sendMessage(to, str(ret_))
#===============================================================================[NEW]
elif msg.text.lower().startswith("checkpraytime "):
sep = text.split(" ")
location = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apisholat.php?lokasi={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
if data[1] != "Subuh : " and data[2] != "Dzuhur : " and data[3] != "Ashr : " and data[4] != "Maghrib : " and data[5] != "Isha : ":
ret_ = "╔══[ Prayer Schedule ]"
ret_ += "\n╠ Lokasi : " + data[0]
ret_ += "\n╠ " + data[1]
ret_ += "\n╠ " + data[2]
ret_ += "\n╠ " + data[3]
ret_ += "\n╠ " + data[4]
ret_ += "\n╠ " + data[5]
ret_ += "\n╚══[ Complete ]"
else:
ret_ = "[ Prayer Schedule ] Error : Lokasi tidak ditemukan"
nadya.sendMessage(to, str(ret_))
elif msg.text.lower().startswith("checkweather "):
sep = text.split(" ")
location = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apicuaca.php?kota={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
if "result" not in data:
ret_ = "╔══[ Weather Status ]"
ret_ += "\n╠ Lokasi : " + data[0].replace("Temperatur di kota ","")
ret_ += "\n╠ Suhu : " + data[1].replace("Suhu : ","")
ret_ += "\n╠ Kelembaban : " + data[2].replace("Kelembaban : ","")
ret_ += "\n╠ Tekanan Udara : " + data[3].replace("Tekanan udara : ","")
ret_ += "\n╠ Kecepatan Angin : " + data[4].replace("Kecepatan angin : ","")
ret_ += "\n╚══[ Complete ]"
else:
ret_ = "[ Weather Status ] Error : Lokasi tidak ditemukan"
nadya.sendMessage(to, str(ret_))
elif msg.text.lower().startswith("checklocation "):
sep = text.split(" ")
location = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apiloc.php?lokasi={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
if data[0] != "" and data[1] != "" and data[2] != "":
link = "https://www.google.co.id/maps/@{},{},15z".format(str(data[1]), str(data[2]))
ret_ = "╔══[ Details Location ]"
ret_ += "\n╠ Lokasi : " + data[0]
ret_ += "\n╠ Google Maps : " + link
ret_ += "\n╚══[ Complete ]"
else:
ret_ = "[ Details Location ] Error : Lokasi tidak ditemukan"
nadya.sendMessage(to,str(ret_))
# Check if only image
elif text.lower() == 'cpp':
settings["changePicture"] = True
nadya.sendMessage(to, "Silahkan kirim gambarnya")
elif text.lower() == 'cgp':
if msg.toType == 2:
if to not in settings["changeGroupPicture"]:
settings["changeGroupPicture"].append(to)
nadya.sendMessage(to, "Silahkan kirim gambarnya")
elif msg.contentType == 1:
if settings["changePicture"] == True:
path = nadya.downloadObjectMsg(msg_id)
settings["changePicture"] = False
nadya.updateProfilePicture(path)
nadya.sendMessage(to, "Berhasil mengubah foto profile")
if msg.toType == 2:
if to in settings["changeGroupPicture"]:
path = nadya.downloadObjectMsg(msg_id)
settings["changeGroupPicture"].remove(to)
nadya.updateGroupPicture(to, path)
nadya.sendMessage(to, "Berhasil mengubah foto group")
elif text.lower() == 'rejectall':
ginvited = nadya.ginvited
if ginvited != [] and ginvited != None:
for gid in ginvited:
nadya.rejectGroupInvitation(gid)
nadya.sendMessage(to, "Berhasil tolak sebanyak {} undangan".format(str(len(ginvited))))
else:
nadya.sendMessage(to, "Tidak ada undangan yang tertunda")
elif text.lower() == 'invgroupcall':
if msg.toType == 2:
group = nadya.getGroup(to)
members = [mem.mid for mem in group.members]
call.acquireGroupCallRoute(to)
call.inviteIntoGroupCall(to, contactIds=members)
nadya.sendMessage(to, "Berhasil mengundang kedalam telponan group")
elif text.lower() == 'removeallchat':
nadya.removeAllMessages(op.param2)
nadya.sendMessage(to, "Berhasil hapus semua chat")
elif text.lower() == 'time':
nadya.sendMessage(to, "Goblok cek sendiri di tanggal jangan manja")
elif msg.text.lower().startswith("gbroadcast "):
sep = text.split(" ")
txt = text.replace(sep[0] + " ","")
groups = nadya.groups
for group in groups:
nadya.sendMessage(group, "[ Broadcast ]\n{}".format(str(txt)))
nadya.sendMessage(to, "Berhasil broadcast ke {} group".format(str(len(groups))))
elif msg.text.lower().startswith("fbroadcast "):
sep = text.split(" ")
txt = text.replace(sep[0] + " ","")
friends = nadya.friends
for friend in friends:
nadya.sendMessage(friend, "[ Broadcast ]\n{}".format(str(txt)))
nadya.sendMessage(to, "Berhasil broadcast ke {} teman".format(str(len(friends))))
elif msg.text.lower().startswith("allbroadcast "):
sep = text.split(" ")
txt = text.replace(sep[0] + " ","")
friends = nadya.friends
groups = nadya.groups
for group in groups:
nadya.sendMessage(group, "[ Broadcast ]\n{}".format(str(txt)))
nadya.sendMessage(to, "Berhasil broadcast ke {} group".format(str(len(groups))))
for friend in friends:
nadya.sendMessage(friend, "[ Broadcast ]\n{}".format(str(txt)))
nadya.sendMessage(to, "Berhasil broadcast ke {} teman".format(str(len(friends))))
#===============================================================================[nadyaMID - kiMID]
if op.type == 19:
print ("[ 19 ] KICKOUT NADYA MESSAGE")
try:
if op.param3 in nadyaMID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
# ginfo = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
# ginfo = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[nadyaMID - ki2MID]
elif op.param3 in nadyaMID:
if op.param2 in ki2MID:
G = ki2.getGroup(op.param1)
# ginfo = ki2.getGroup(op.param1)
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
# ginfo = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[nadyaMID - ki3MID]
elif op.param3 in nadyaMID:
if op.param2 in ki3MID:
G = ki3.getGroup(op.param1)
# ginfo = ki3.getGroup(op.param1)
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
else:
G = ki3.getGroup(op.param1)
# ginfo = ki3.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[nadyaMID - ki4MID]
elif op.param3 in nadyaMID:
if op.param2 in ki4MID:
G = ki4.getGroup(op.param1)
# ginfo = ki4.getGroup(op.param1)
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
# ginfo = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
settings["blacklist"][op.param2] = True
#===============================================================================[kiMID nadyaMID]
if op.param3 in kiMID:
if op.param2 in nadyaMID:
G = nadya.getGroup(op.param1)
# ginfo = nadya.getGroup(op.param1)
G.preventedJoinByTicket = False
nadya.updateGroup(G)
invsend = 0
Ticket = nadya.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = nadya.getGroup(op.param1)
G.preventedJoinByTicket = True
nadya.updateGroup(G)
G.preventedJoinByTicket(G)
nadya.updateGroup(G)
else:
G = nadya.getGroup(op.param1)
# ginfo = nadya.getGroup(op.param1)
nadya.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
nadya.updateGroup(G)
invsend = 0
Ticket = nadya.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = nadya.getGroup(op.param1)
G.preventedJoinByTicket = True
nadya.updateGroup(G)
G.preventedJoinByTicket(G)
nadya.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[kiMID ki2MID]
elif op.param3 in kiMID:
if op.param2 in ki2MID:
G = ki2.getGroup(op.param1)
# ginfo = ki2.getGroup(op.param1)
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
# ginfo = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[kiMID ki3MID]
elif op.param3 in kiMID:
if op.param2 in ki3MID:
G = ki3.getGroup(op.param1)
# ginfo = ki3.getGroup(op.param1)
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
else:
G = ki3.getGroup(op.param1)
# ginfo = ki3.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[kiMID ki4MID]
elif op.param3 in kiMID:
if op.param2 in ki4MID:
G = ki4.getGroup(op.param1)
# ginfo = ki4.getGroup(op.param1)
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
# ginfo = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
settings["blacklist"][op.param2] = True
#===============================================================================[ki2MID nadyaMID]
if op.param3 in ki2MID:
if op.param2 in nadyaMID:
G = nadya.getGroup(op.param1)
# ginfo = nadya.getGroup(op.param1)
G.preventedJoinByTicket = False
nadya.updateGroup(G)
invsend = 0
Ticket = nadya.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = nadya.getGroup(op.param1)
G.preventedJoinByTicket = True
nadya.updateGroup(G)
G.preventedJoinByTicket(G)
nadya.updateGroup(G)
else:
G = nadya.getGroup(op.param1)
# ginfo = nadya.getGroup(op.param1)
nadya.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
nadya.updateGroup(G)
invsend = 0
Ticket = nadya.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = nadya.getGroup(op.param1)
G.preventedJoinByTicket = True
nadya.updateGroup(G)
G.preventedJoinByTicket(G)
nadya.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[ki2MID kiMID]
elif op.param3 in ki2MID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
# ginfo = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
# ginfo = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[ki2MID ki3MID]
elif op.param3 in ki2MID:
if op.param2 in ki3MID:
G = ki3.getGroup(op.param1)
# ginfo = ki3.getGroup(op.param1)
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
else:
G = ki3.getGroup(op.param1)
# ginfo = ki3.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[ki2MID ki4MID]
elif op.param3 in ki2MID:
if op.param2 in ki4MID:
G = ki4.getGroup(op.param1)
# ginfo = ki4.getGroup(op.param1)
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
# ginfo = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
settings["blacklist"][op.param2] = True
#===============================================================================[ki3MID nadyaMID]
if op.param3 in ki3MID:
if op.param2 in nadyaMID:
G = nadya.getGroup(op.param1)
# ginfo = nadya.getGroup(op.param1)
G.preventedJoinByTicket = False
nadya.updateGroup(G)
invsend = 0
Ticket = nadya.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = nadya.getGroup(op.param1)
G.preventedJoinByTicket = True
nadya.updateGroup(G)
G.preventedJoinByTicket(G)
nadya.updateGroup(G)
else:
G = nadya.getGroup(op.param1)
# ginfo = nadya.getGroup(op.param1)
nadya.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
nadya.updateGroup(G)
invsend = 0
Ticket = nadya.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = nadya.getGroup(op.param1)
G.preventedJoinByTicket = True
nadya.updateGroup(G)
G.preventedJoinByTicket(G)
nadya.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[ki3MID kiMID]
elif op.param3 in ki3MID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
# ginfo = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
# ginfo = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[ki3MID ki2MID]
elif op.param3 in ki3MID:
if op.param2 in ki2MID:
G = ki2.getGroup(op.param1)
# ginfo = ki2.getGroup(op.param1)
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
# ginfo = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[ki3MID ki4MID]
elif op.param3 in ki3MID:
if op.param2 in ki4MID:
G = ki4.getGroup(op.param1)
# ginfo = ki4.getGroup(op.param1)
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
# ginfo = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
settings["blacklist"][op.param2] = True
#===============================================================================[ki4MID nadyaMID]
if op.param3 in ki4MID:
if op.param2 in nadyaMID:
G = nadya.getGroup(op.param1)
# ginfo = nadya.getGroup(op.param1)
G.preventedJoinByTicket = False
nadya.updateGroup(G)
invsend = 0
Ticket = nadya.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = nadya.getGroup(op.param1)
G.preventedJoinByTicket = True
nadya.updateGroup(G)
G.preventedJoinByTicket(G)
nadya.updateGroup(G)
else:
G = nadya.getGroup(op.param1)
# ginfo = nadya.getGroup(op.param1)
nadya.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
nadya.updateGroup(G)
invsend = 0
Ticket = nadya.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = nadya.getGroup(op.param1)
G.preventedJoinByTicket = True
nadya.updateGroup(G)
G.preventedJoinByTicket(G)
nadya.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[ki4MID kiMID]
elif op.param3 in ki4MID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
# ginfo = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
# ginfo = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[ki4MID ki2MID]
elif op.param3 in ki4MID:
if op.param2 in ki2MID:
G = ki2.getGroup(op.param1)
# ginfo = ki2.getGroup(op.param1)
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
# ginfo = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
settings["blacklist"][op.param2] = True
#-------------------------------------------------------------------------------[ki4MID ki3MID]
elif op.param3 in ki4MID:
if op.param2 in ki3MID:
G = ki3.getGroup(op.param1)
# ginfo = ki3.getGroup(op.param1)
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
else:
G = ki3.getGroup(op.param1)
# ginfo = ki3.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
nadya.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
settings["blacklist"][op.param2] = True
elif op.param2 not in Bots:
if op.param2 in admin:
pass
elif settings["protect"] == True:
settings["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
random.choice(KAC).sendText(op.param1,"Don't Play bro...!")
else:
pass
except:
pass
#==============================================================================#
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in admin:
pass
elif settings["inviteprotect"] == True:
settings["blacklist"][op.param2] = True
random.choice(KAC).cancelGroupInvitation(op.param1,[op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Bots:
if op.param2 in admin:
pass
elif settings["cancelprotect"] == True:
settings["blacklist"][op.param2] = True
random.choice(KAC).cancelGroupInvitation(op.param1,[op.param3])
#-------------------------------------------------------------------------------
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in admin and Bots and Owner:
pass
elif settings["qrprotect"] == True:
settings["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
nadya.sendMessage(op.param1,"Qr under protect")
else:
nadya.sendMessage(op.param1,"")
#==============================================================================#
if op.type == 55:
print ("[ 55 ] NOTIFIED READ MESSAGE")
if op.param1 in read["readPoint"]:
_name = nadya.getContact(op.param2).displayName
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
timeHours = datetime.strftime(timeNow," (%H:%M)")
read["readMember"][op.param1][op.param2] = str(_name) + str(timeHours)
backupData()
except Exception as error:
logError(error)
#==============================================================================#
if op.type == 26:
msg = op.message
if text.lower() == '/ti/g/':
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = nadya.findGroupByTicket(ticket_id)
nadya.acceptGroupInvitationByTicket(group.id,ticket_id)
nadya.sendMessage(to, "Berhasil masuk ke group %s" % str(group.name))
if op.type == 26:
print ("[ 26 ] RECEIVE MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != nadya.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if settings["autoRead"] == True:
nadya.sendChatChecked(to, msg_id)
if to in read["readPoint"]:
if sender not in read["ROM"][to]:
read["ROM"][to][sender] = True
if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True:
text = msg.text
if text is not None:
nadya.sendMessage(msg.to,text)
if msg.contentType == 0 and sender not in nadyaMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if nadyaMID in mention["M"]:
if settings["detectMention"] == True:
contact = nadya.getContact(sender)
nadya.sendMessage(to, "sundala nu")
sendMessageWithMention(to, contact.mid)
break
except Exception as error:
logError(error)
#==============================================================================#
# Auto join if BOT invited to group
def NOTIFIED_INVITE_INTO_GROUP(op):
try:
nadya.acceptGroupInvitation(op.param1)
ki.acceptGroupInvitation(op.param1)
ki2.acceptGroupInvitation(op.param1)
ki3.acceptGroupInvitation(op.param1)
ki4.acceptGroupInvitation(op.param1)
except Exception as e:
nadya.log("[NOTIFIED_INVITE_INTO_GROUP] ERROR : " + str(e))
# Auto kick if BOT out to group
def NOTIFIED_KICKOUT_FROM_GROUP(op):
try:
if op.param2 not in Bots:
random.choice(KAC).kickoutFromGroup(op.param1,op.param2)
else:
pass
except Exception as e:
nadya.log("[NOTIFIED_KICKOUT_FROM_GROUP] ERROR : " + str(e))
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
lineBot(op)
oepoll.setRevision(op.revision)
except Exception as e:
logError(e)
| [
"noreply@github.com"
] | noreply@github.com |
122dc43189aa337649067dc25830b6b64f0ac0ae | a9450ebdc79425cae023da5866b3d1d9b98d6fb2 | /vrchat-api-python/examples/download_friend_images.py | 7fad3fbbf79c58e4658267b70c7b3d6d57f906bf | [
"MIT"
] | permissive | kugiha/twitter-vrc-status | a3c8e75dbf8dbf064c68fd19a2dbf635b7ad40f3 | 0016baa4a25f89497b0280a5f534f8428023c230 | refs/heads/master | 2020-04-21T20:28:00.795918 | 2019-03-14T14:22:13 | 2019-03-14T14:22:13 | 169,845,658 | 23 | 2 | MIT | 2019-03-14T13:42:51 | 2019-02-09T08:12:48 | Python | UTF-8 | Python | false | false | 776 | py | #!/usr/bin/env python3
import os
from getpass import getpass
import requests
from vrchat_api import VRChatAPI
from vrchat_api.enum import ModerationType
"""
Download friends' avatar thumbnails to `friend_thumbnails`.
This may takes for a while if you have a lot of friends.
"""
DIR = "friend_thumbnails"
if not os.path.exists(DIR):
os.mkdir(DIR)
a = VRChatAPI(
getpass("VRChat Username"),
getpass()
)
a.authenticate()
friends = a.getFriends()
for f in friends:
print("Downloading {:20} {}".format(f.username+"'s thumbnail:", f.currentAvatarImageUrl))
ret = requests.get(f.currentAvatarImageUrl)
assert ret.status_code == 200
with open(os.path.join(DIR, "{}.png".format(f.username)), "wb") as f:
for c in ret:
f.write(c)
| [
"46911299+kugiha@users.noreply.github.com"
] | 46911299+kugiha@users.noreply.github.com |
9eda3c73e40bf37ee7c3b604cc517878bcb49715 | 4e4f921ccc8c139bb8918d5c3dfd7fa3e3c9b686 | /tcpserver.py | 43f7962fee662476bd65455bdf901e39565f9664 | [] | no_license | jay1499/ComputerNetworks_1BM17CS033 | b503e64d5496506cde0aaa14426fc9a77699f65e | cb875fc4418fd6a9cca9b6d074fade009ddd7823 | refs/heads/master | 2020-07-05T13:04:41.155086 | 2019-11-20T06:38:42 | 2019-11-20T06:38:42 | 202,654,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | from socket import *
serverName = "10.90.2.154"
serverPort = 12000
serverSocket = socket(AF_INET, SOCK_DGRAM)
serverSocket.bind((serverName, serverPort))
print("The server is ready to receive ")
while True:
fileName, addr = serverSocket.recvfrom(1024)
fileName = fileName.decode()
with open(fileName, "r") as f:
message = f.read()
serverSocket.sendto(message.encode(), addr)
© 2019 GitHub, Inc.
| [
"noreply@github.com"
] | noreply@github.com |
bc23b9d69210017a402610181ac43e53d89e6aa2 | a5a489f8a268e3d13286fa7ca000d3f26d10d263 | /Basic_Stat/hypothesis_test.py | 812bfd5a97f557194fda9a43b59d377e9ed321c2 | [] | no_license | jusui/Data_Science | cd36e1b9d675be7b5deb98a6034ce57339f09b41 | 7d2ffea15532e35ea64597b3d6f53752a1d4322e | refs/heads/master | 2021-09-24T06:38:55.588645 | 2018-10-04T14:50:08 | 2018-10-04T14:50:08 | 110,791,573 | 0 | 0 | null | 2018-08-04T15:44:22 | 2017-11-15T06:13:28 | Jupyter Notebook | UTF-8 | Python | false | false | 33 | py | # coding:utf-8
import numpy as np | [
"junya.usui.0714@gmail.com"
] | junya.usui.0714@gmail.com |
b5e6c84cbadba8486d77887c8f70e02ede019cb7 | 0d833f0fed48955bf688b9490d29c9c83d84a1be | /web_flask/3-python_route.py | cdaf6e4cd7877b0b8a5a8c2e3d8eb392bff9a03f | [] | no_license | krytech/AirBnB_clone_v2 | 116403bb0df49647a3d04eaaee19838234b3123e | d7b3cf65bd2fc4deaa22efb18822f55d8b056327 | refs/heads/master | 2023-07-28T01:10:39.296493 | 2021-09-12T20:47:10 | 2021-09-12T20:47:10 | 400,201,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #!/usr/bin/python3
"""
Starts Flask web app
"""
from flask import Flask
app = Flask(__name__)
app.url_map.strict_slashes = False
@app.route('/')
def hello_hbnb():
"""Displays text"""
return "Hello HBNB!"
@app.route('/hbnb')
def hbnb():
"""Displays text"""
return "HBNB"
@app.route('/c/<text>')
def c_text(text):
"""Displays custom text"""
return "C {}".format(text.replace('_', ' '))
@app.route('/python')
@app.route('/python/<text>')
def python_text(text="is cool"):
"""Displays custom text, with default value"""
return "Python {}".format(text.replace('_', ' '))
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
| [
"cstearn@gmail.com"
] | cstearn@gmail.com |
10981bf2173d64926fb95fca90610ff3c24eb6bd | a2780ef4728af2b6191c4b7f41e5a677d5f2c511 | /py_sql/test.py | cb24f8bb7b5df2736cfdb006fb7156852392b740 | [] | no_license | hcsk/python_codes | 2f99f388dc38b9dd8dd6035b96ec6c34cef54cff | 400a0d32290fe30735edfa2b68bf856590db779c | refs/heads/master | 2021-08-15T21:33:05.021540 | 2017-11-18T09:33:08 | 2017-11-18T09:33:08 | 111,190,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py |
import MySQLdb
# conn=MySQLdb.connect(host='localhost',
# port=3306,
# user='root',
# passwd=''
# )
conn=MySQLdb.connect(
user='root',
#passwd=''
)
cur=conn.cursor()
cur.execute("show databases")
rows=cur.fetchall()
for row in rows:
print row
cur.close()
conn.commit()
conn.close() | [
"hpusk365@gmail.com"
] | hpusk365@gmail.com |
56a12d7b69aadf1aa29d400d196f364a7966d896 | 328261e74a372cdd58cb1cc7e785cb2424ab9ff7 | /sibyl/protocol/sibyl_client_udp_text_protocol.py | 6053acc9e1ef080ef3b98b25a3f120a21356dc5b | [] | no_license | Yousthon/Networks-Capstone-Project-C2W | aa667b4b9c5a8991dbdc47653ac091f9f1f99a85 | d5bf13f0f9802d1b11f004bbd620e5c456bb3109 | refs/heads/master | 2020-05-17T14:27:20.487387 | 2019-04-27T11:37:00 | 2019-04-27T11:37:00 | 183,764,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,455 | py | # -*- coding: utf-8 -*-
from twisted.internet.protocol import DatagramProtocol
import time
class SibylClientUdpTextProtocol(DatagramProtocol):
"""
The class implementing the Sibyl UDP text client protocol. It has
the following attributes:
.. attribute:: proxy
The reference to the SibylCientProxy (instance of the
:py:class:`~sibyl.main.sibyl_client_proxy.SibylClientProxy` class).
.. warning::
All interactions between the client protocol and the user
interface *must* go through the SibylClientProxy. In other
words you must call one of the methods of
:py:class:`~sibyl.main.sibyl_client_proxy.SibylClientProxy`
whenever you would like the user interface to do something.
.. attribute:: serverAddress
The address of the server.
.. attribute:: serverPort
The port number of the server.
.. note::
You must not instantiate this class. This is done by the code
called by the main function.
.. note::
You have to implement this class. You may add any attribute and
method that you see fit to this class. You must implement two
methods:
:py:meth:`~sibyl.main.protocol.sibyl_cliend_udp_text_protocol.sendRequest`
and
:py:meth:`~sibyl.main.protocol.sibyl_cliend_udp_text_protocol.datagramReceived`.
See the corresponding documentation below.
"""
def __init__(self, sibylClientProxy, port, host):
"""The implementation of the UDP client text protocol.
Args:
sibylClientProxy: the instance of the client proxy,
this is the only way to interact with the user
interface;
port: the port number of the server;
host: the address of the server.
"""
self.serverAddress = host
self.serverPort = port
self.clientProxy = sibylClientProxy
def sendRequest(self, line):
"""Called by the controller to send the request
The :py:class:`~sibyl.main.sibyl_client_proxy.SibylClientProxy` calls
this method when the user clicks on the "Send Question" button.
Args:
line (string): the text of the question
.. warning::
You must implement this method. You must not change the parameters,
as the controller calls it.
"""
self.transport.connect(self.serverAddress,self.serverPort)
reponse = str(int(time.time()))+": "+line+"CRLF"
print(reponse)
self.transport.write(reponse.encode('utf-8'))
"""timestamp=time.gmtime()
timetmp=time.mktime(timestamp)
sms=str(int(timetmp))+":"+line+"CLRF"
self.transport.write(bytesms)"""
pass
def datagramReceived(self, datagram, host_port):
"""Called by Twisted whenever a datagram is received
Twisted calls this method whenever a datagram is received.
Args:
datagram (bytes): the payload of the UPD packet;
host_port (tuple): the source host and port number.
.. warning::
You must implement this method. You must not change the parameters,
as Twisted calls it.
"""
self.clientProxy.responseReceived(datagram.decode("utf-8"))
print(datagram.decode("utf-8"))
pass
| [
"beyegbin-baudouin-venceslas.kouassi@imt-atlantique.net"
] | beyegbin-baudouin-venceslas.kouassi@imt-atlantique.net |
470cfb5e9ae74a30f9f96b586becfb3043effda3 | 233f97c6f360d478bf975016dd9e9c2be4a64adb | /guvi_4_3_8.py | 5474fe795104649ed224413b4b7e015287da17e6 | [] | no_license | unknownboyy/GUVI | 3dbd1bb2bc6b3db52f5f79491accd6c56a2dec45 | d757dd473c4f5eef526a516cf64a1757eb235869 | refs/heads/master | 2020-03-27T00:07:12.449280 | 2019-03-19T12:57:03 | 2019-03-19T12:57:03 | 145,595,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | print("guvi_4_3_8.py") | [
"ankitagrawal11b@gmail.com"
] | ankitagrawal11b@gmail.com |
21837a508985ac6fd5e3b388db93f0970ced86cf | 8ba7809c0bf33aad972d5ead73f720099753f158 | /backend/debits/migrations/0003_auto_20180213_1228.py | c37dee7d394599767ca950dcc39fb6860d61d278 | [
"BSD-3-Clause"
] | permissive | picsadotcom/maguire | acecdf91082a1ede6144c6eaebc03511e3141b89 | bb15b0dd84bbc1f88e2755f342d4fb586c4634b7 | refs/heads/master | 2022-09-13T10:01:39.077557 | 2022-08-22T18:29:19 | 2022-08-22T18:29:19 | 105,751,650 | 2 | 1 | BSD-3-Clause | 2022-08-23T18:02:45 | 2017-10-04T09:31:28 | Python | UTF-8 | Python | false | false | 820 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-13 12:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('debits', '0002_auto_20180213_1222'),
]
operations = [
migrations.AddField(
model_name='debit',
name='last_error',
field=models.TextField(blank=True, help_text='The error message received on the last attempt to load the debit', null=True, verbose_name='Last Error'),
),
migrations.AddField(
model_name='debit',
name='load_attempts',
field=models.IntegerField(default=0, help_text='Number of times maguire has attmepted to load the debit', verbose_name='Load Attempts'),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
776dd5240d98e0308d6508abc8ced2f22df18204 | a67a4df006890da2d967d9d46b4b1428431648ad | /myblog/blog/migrations/0002_comment.py | 31a6963615589369c5ea21feff97df8d61617d55 | [] | no_license | Gmiller290488/myBlog | cf0e2ee2ee31a08e357f9084d09522feda199ebe | 0905e3d1c7a2298f00507da1807142492f93d1c1 | refs/heads/master | 2021-01-19T00:27:54.005198 | 2018-01-11T16:24:14 | 2018-01-11T16:24:14 | 87,071,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-06 09:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
| [
"gmiller290488@gmail.com"
] | gmiller290488@gmail.com |
aaf37ca02683982116ce8dcfb9b3bd8d676896d3 | 3e3096b4b2503697fb0ecd2942d1d9cfbfeaf787 | /cutter/bayes_cutter.py | 41d492516b4439ffadeacc58233812741495dd2f | [] | no_license | LuShengDong/resumeparsing | 6dc8216d14db02840c210f026f2fd470447b59cb | f5aeef381c55249b245f4e989cef136a556be99c | refs/heads/master | 2020-03-09T21:14:38.860145 | 2018-04-10T23:07:28 | 2018-04-10T23:07:28 | 129,004,247 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | # -*- coding: utf-8 -*-
from cutter.bayes import get_classifier, features_gen, cls_path
import codecs
import argparse
import pickle
def cut(lines, train=False) -> dict:
struct = {}
if train:
classifier, _ = get_classifier()
else:
with open(cls_path, 'rb') as cls_f:
classifier = pickle.load(cls_f)
current = []
struct['Personal_Info'] = current
for line in lines:
line = line.strip()
if classifier.classify(features_gen(line)) == 'title':
current = []
struct[line] = current
continue
current.append(line)
return struct
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-r', type=str, help='resume')
args = parser.parse_args()
with codecs.open(args.r, 'r', 'utf-8') as f:
resume = f.readlines()
structure = cut(resume)
import json
with codecs.open('data.txt', 'w', 'utf-8') as outfile:
json.dump(structure, outfile)
| [
"HandsomeDong@users.noreply.github.com"
] | HandsomeDong@users.noreply.github.com |
4f1556b846bb9618ed039b87625359d300eb96e6 | d85499e71af351da4e3e8ecb4fb498f954bf5b5c | /preprocessing/Twitter_Cassandra_analytics_pipeline/CassandraDriver.py | 587385d51a1529e36d2b50c1b903e6b9f37d44cd | [] | no_license | KahiniWadhawan/Big-Neuron | 56c3d86779e227fb0d9abab7f484ff25b25ec04b | 528d8ec68058b06b710c94256bfafbd2b9ddd972 | refs/heads/master | 2021-01-22T01:43:49.969790 | 2016-04-27T20:30:28 | 2016-04-27T20:30:28 | 57,263,743 | 1 | 0 | null | 2016-04-28T02:21:36 | 2016-04-28T02:21:36 | null | UTF-8 | Python | false | false | 849 | py | '''
============================================================================
Name : CassandraDriver.py
Author : Piyush
Version : 1
Copyright : DS
Description :
API's required to connect to the Cassandra database. Create object of this class and call the __init__()
method on this superclass to create an instance of the driver. ( You can test it by calling the TestSupport
method.)
============================================================================
'''
import TOKENS
import time
from Analytics import IBMToneAnalyzer
class CassandraAPI(IBMToneAnalyzer):
def __init__(self):
IBMToneAnalyzer.__init__(self)
from cassandra.cluster import Cluster
cluster = Cluster()
self.session = cluster.connect(TOKENS.cassandra_cluster)
def TestSupport(self):
self.aaa=10
print "Hello there"
exit()
| [
"pipa0979@colorado.edu"
] | pipa0979@colorado.edu |
86d76f1d5a9dda9351779d645d022d6b41df36e7 | fdfeea83edbf239230a5aee558641efdf3531724 | /basic/python_Data_Structure/DataStructure/pop.py | 7227d2f78a5d6c9a3eac780e81c22eee8e4c07dc | [] | no_license | jszheng/PyChild | 192cdb57a9d2c87bdcb5b20c110c82521a2a02b8 | 02ab3959f067a246674ad29dbf7a0929babae57e | refs/heads/master | 2021-05-16T03:22:00.010716 | 2020-07-03T14:08:16 | 2020-07-03T14:08:16 | 39,426,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | import timeit
popzero = timeit.Timer('x.pop(0)',
'from __main__ import x')
popend = timeit.Timer('x.pop()',
'from __main__ import x')
# x = list(range(2000))
#
# print(popzero.timeit(number=1000))
# print(popend.timeit(number=1000))
print(" pop(0) pop()")
for i in range(1000000,10000001,1000000):
x = list(range(i))
pt = popend.timeit(number=1000)
# x = list(range(i))
pz = popzero.timeit(number=1000)
print("%15.5f, %15.5f" %(pz,pt))
| [
"zyz@hotmail.com"
] | zyz@hotmail.com |
50b919cd268e6cd0bb6b7b6435a97dd8db122345 | e9a61b359195705a7ce7492c9a8ab3a56d276347 | /12_oop/ctrl-c异常.py | a6cc92f65388e7e5599605e17790f6b7fdf6991d | [] | no_license | wovert/PythonTutorials | 79ac68b173af3f2a0eceb04b302b0d05f2f6bdc3 | f625b8a82975bb40f8e3d5358e7f51dd214893a4 | refs/heads/master | 2022-07-26T12:05:41.630423 | 2022-07-08T01:03:40 | 2022-07-08T01:03:40 | 127,613,862 | 40 | 16 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | import time
while True:
print("haha")
time.sleep(1) | [
"wovert@126.com"
] | wovert@126.com |
441440eb137b7cf9fe50cd06eb3c1df30ee31e59 | 36d9e2488cd85cdd7a707b793cd80b2bc5ee07e1 | /DP/[백준]11057 오르막 수.py | a839598e0477358ce78e5f97ac776a9baed3cdee | [] | no_license | rheehot/Algorithm-36 | a2a2335a0bad524e9ed0c87254cc1348526f5ab3 | 560c0fcb4103afba1995a61755f9e3070c433e4e | refs/heads/master | 2023-01-21T10:33:51.791421 | 2020-11-30T06:53:45 | 2020-11-30T06:53:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | # https://www.acmicpc.net/problem/11057
'''python
간단한 문제이다.
dp[i][j]를 i길이의 j숫자가 마지막으로 오는 경우의 수라고 생각하고 풀면된다.
i-1길이의 경우에서 j보다 같거나 작은 숫자가f 오는 경우를 더해주면된다.(k <= j)
dp[i][j] += dp[i-1][k]의 점화식이 성립하게 된다.
'''
import sys
def read():
return sys.stdin.readline().strip()
n = int(read())
dp = [[0 for _ in range(10)] for _ in range(n)]
for i in range(10):
dp[0][i] = i+1
for i in range(1, n):
for j in range(10):
for k in range(j+1):
dp[i][j] += dp[i-1][k]
print(max(dp[n-1]) % 10007)
| [
"wfo3977@gmail.com"
] | wfo3977@gmail.com |
1b80f4522f4cc303b7c7229a8447611635a1859f | 7c1de4cefad753b593872e9ae8f392f22d0965bd | /docs/source/conf.py | 05d21e59b84b7deb6c5f663fafdcd078f510f63d | [
"MIT"
] | permissive | sergegoussev/pysqlc | 0c0d71dc7ce33c8f146b16136f3976043e5a23f5 | 6a4f29d97779e1e513dab37c60c51d53eefac0eb | refs/heads/0.2-dev | 2021-04-09T16:29:56.204797 | 2019-10-17T00:31:22 | 2019-10-17T00:31:22 | 125,658,977 | 1 | 0 | MIT | 2019-10-17T00:34:42 | 2018-03-17T18:44:48 | Python | UTF-8 | Python | false | false | 5,471 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import datetime
# -- Project information -----------------------------------------------------
project = 'pysqlc'
copyright = '2019, @sergegoussev'
author = '@sergegoussev'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1.5'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['recommonmark']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_context = {
'author': "Serge Goussev",
'date': datetime.date.today().strftime('%d/%m/%y'),
'source_url_prefix': "https://github.com/sergegoussev/pysql/",
"display_github": True,
"github_host": "github.com",
"github_user": "sergegoussev",
"github_repo": 'pysql',
"github_version": "master/docs/source/",
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysqlcdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pysqlc.tex', 'pysqlc Documentation',
'@sergegoussev', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysqlc', 'pysqlc Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pysqlc', 'pysqlc Documentation',
author, 'pysqlc', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| [
"goussev.serge@gmail.com"
] | goussev.serge@gmail.com |
5d565e7d89b2cf7e44965b839844bcc6a47e0e56 | ecbbc5cf8b49de00dd956386ea7cf31951aecbf8 | /src/KalmanFilter.py | d0005ea5d794108215ebbe567191ff497c0fe45c | [] | no_license | connorlee77/ardrone_stateestimation | 9e49339c6d916a146a709acc4adf947453c9d626 | 253722cf1940fd368bc10dcd90be0c0113bb4339 | refs/heads/master | 2021-01-10T13:13:57.845898 | 2016-03-18T08:53:18 | 2016-03-18T08:53:18 | 53,226,979 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | import numpy as np
import matplotlib.pyplot as plt
import rospy
class KalmanFilter:
def __init__(self, A, P, R, Q, H, B, dimension):
self.A = A
self.P = P
self.x_k = 0
self.kalmanGain = 0
self.R = R #constant
self.Q = Q #constant
self.H = H
self.B = B
self.dimensions = dimension
def predictState(self, u_k):
#rospy.loginfo("predict_state1")
#rospy.loginfo(self.x_k)
self.x_k = np.add(
np.dot(self.A, self.x_k),
np.dot(self.B, u_k))
#rospy.loginfo("predict_state2")
#rospy.loginfo(self.x_k)
self.P = np.add(np.dot(
np.dot(self.A, self.P),
np.transpose(self.A)), self.Q)
def getKalmanGain(self):
first = np.dot(self.P, np.transpose(self.H))
second = np.linalg.inv(
np.add(
np.dot(
np.dot(self.H, self.P),
np.transpose(self.H)),
self.R))
self.kalmanGain = np.dot(first, second)
def update(self, z_k):
residual = np.subtract(
z_k,
np.dot(
self.H,
self.x_k))
#chad = z_k
#rospy.loginfo("update1")
#rospy.loginfo(chad)
self.x_k = np.add(self.x_k, np.dot(self.kalmanGain, residual))
#rospy.loginfo("update2")
#rospy.loginfo(self.x_k)
self.P = np.dot(
np.subtract(
np.identity(self.dimensions),
np.dot(
self.kalmanGain,
self.H)),
self.P)
| [
"connorlee77@gmail.com"
] | connorlee77@gmail.com |
29c0ecb0109c475fb98ef8b28a9127bc48b12693 | 83d7af883df9f83e987b6401fe9a57ec73cacac0 | /dynamic_programming/CtCI_8_1_triple_step/Solution.py | 7e450639436dd358480211033d3f6c5ba0ea4571 | [] | no_license | chopdev/leetcode_tasks | 93f3b807e3e33a2735cb9558dbe0f1b39d7c08f9 | 89aee5e9064e1bfe9873d26836453f0c7507bbc4 | refs/heads/master | 2023-08-28T04:46:01.534860 | 2023-08-27T08:17:15 | 2023-08-27T08:17:15 | 135,029,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | # Triple Step: A child is running up a staircase with n steps and can hop either 1 step, 2 steps, or 3
# steps at a time. Implement a method to count how many possible ways the child can run up the
# stairs.
### IMPORTANT: you need to care about int overflow, which has a limit 2^31 - 1
# complexity of pure recursive algorithm is ~ O(3^N)
# stair[i] = stair[i-1] + 1 + stair[i - 2] + 2 + stair[i - 3] + 3
# from stair[i-3] to stair[i] we have 3 ways we can go: do three 1 steps / do one 1 step + one 2 step / do one 3 step
# O(N) time and space
# my solution
def get_possibilities_count(n: int) -> int:
stair = [0]
stair.insert(1, 1)
stair.insert(2, 2)
stair.insert(3, 3)
for i in range(4, n + 1):
stair.insert(i, stair[i-1] + 1 + stair[i - 2] + 2 + stair[i - 3] + 3)
return stair[n]
# My solution
# O(1) space, O(N) time
def get_possibilities_count2222(n: int) -> int:
if n == 1: return 1
if n == 2: return 2
if n == 3: return 3
first = 1
second = 2
third = 3
res = 0
for i in range(4, n + 1):
res = first + 1 + second + 2 + third + 3
first = second
second = third
third = res
return res
######################################################################
print(get_possibilities_count(3))
print(get_possibilities_count(4))
print(get_possibilities_count2222(3))
print(get_possibilities_count2222(4)) | [
"taras.plavin@gmail.com"
] | taras.plavin@gmail.com |
aa0a9e73022a1268c8dc56985d5d5848748aa64e | 3fe272eea1c91cc5719704265eab49534176ff0d | /scripts/item/consume_2439898.py | fdc636b193089e8c5f0e75eb0dac9c8a17c50c85 | [
"MIT"
] | permissive | Bratah123/v203.4 | e72be4843828def05592298df44b081515b7ca68 | 9cd3f31fb2ef251de2c5968c75aeebae9c66d37a | refs/heads/master | 2023-02-15T06:15:51.770849 | 2021-01-06T05:45:59 | 2021-01-06T05:45:59 | 316,366,462 | 1 | 0 | MIT | 2020-12-18T17:01:25 | 2020-11-27T00:50:26 | Java | UTF-8 | Python | false | false | 217 | py | # Created by MechAviv
# Valentine Damage Skin | (2439898)
if sm.addDamageSkin(2439898):
sm.chat("'Valentine Damage Skin' Damage Skin has been added to your account's damage skin collection.")
sm.consumeItem() | [
"pokesmurfuwu@gmail.com"
] | pokesmurfuwu@gmail.com |
47131fde0464d479f5657ea59691d979ecfab11b | b13bb435d1b3e2a119ef9237bc8069e04ed4f95c | /src/algospot/Josephus.py | 7462569798f1bc8f31176fa92a10b1a6f07674ba | [] | no_license | beyond-algorithms/JaeEun | ebdaa5d9f4570b9d0eb57858c71d06e80f0fa4b4 | 4e990215fdb053defe9a52c3db43b05f946498de | refs/heads/master | 2020-03-22T17:56:17.285056 | 2018-10-02T18:01:35 | 2018-10-02T18:01:35 | 140,425,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | from src.Test import Test as T
def main():
t = int(input().strip())
for _ in range(t):
N, K = list(map(int, input().strip().split()))
print(" ".join(list(map(str, solve(N, K)))))
def solve(N, K):
alive = N
p = [x for x in range(N)]
idx = 0
while alive > 2:
p.pop(idx)
idx = (idx + K - 1) % len(p)
alive -= 1
return list(map(lambda x: x + 1, p))
user_input = '''
2
6 3
40 3
'''
expected = '''
3 5
11 26
'''
T.runningTest(user_input.strip(), expected.lstrip(), main)
| [
"lette1394+github@gmail.com"
] | lette1394+github@gmail.com |
b7b150a6decc3c4f7dbb7a3c6dc7588a590a4a7e | 2278a6b53aa468e6d95bb1d7222d06af59b99585 | /assignments/assignment-one/workers/insight_worker/setup.py | d22767b133043993518fedf5812cbd5f5a60ecb1 | [
"LicenseRef-scancode-dco-1.1",
"MIT"
] | permissive | ChaseWhite44/cwwtzc | f51db97e72baf3cba063b464938679b4a47065ac | 0fe496c3bc1b9f15a190e47ba82deede7b875a0f | refs/heads/master | 2022-12-15T12:32:25.827200 | 2020-05-19T03:19:55 | 2020-05-19T03:19:55 | 235,884,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | import io
import os
import re
from setuptools import find_packages
from setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="insight_worker",
version="0.0.2",
url="https://github.com/chaoss/augur",
license='MIT',
author="Augur Team",
author_email="s@goggins.com",
description="Augur Worker that discovers and stores data anomalies",
long_description=read("README.rst"),
packages=find_packages(exclude=('tests',)),
install_requires=['flask', 'numpy', 'requests', 'psycopg2-binary', 'click', 'scipy'],
entry_points={
'console_scripts': [
'insight_worker_start=insight_worker.runtime:main',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
)
| [
"noreply@github.com"
] | noreply@github.com |
26694f9780833902f81f8de8c55371feb6a78908 | 24171ea136e2ec211792d1d7644cd5c945a6df35 | /test/52.py | 04d1a38f0f844e2e4b3efaaac148680de7664d2b | [] | no_license | reidevries/codecoach | 1329ab367dc8aa3f3dd76af0b7cbc975a7d67ccd | a6d8e3cf28a6d264b0aa6aa8a44cc315803954b2 | refs/heads/master | 2021-05-23T10:02:53.403344 | 2020-04-05T12:57:24 | 2020-04-05T12:57:24 | 253,233,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | import libtcodpy as libtcod
import conf
import entity
import tile
import map
import camera
class Mode(object):
"Mode is an abstract class used to allow the developer to implement parts of a new Mode subclass without needing placeholder methods and fields."
def __init__(self, owner):
self.entities = []
def update(self, key):
pass
def draw(self):
pass
def erase(self):
pass
def isBlocked(self, x, y):
return False
class DungeonMode(Mode):
def __init__(self, owner):
super(DungeonMode, self).__init__(owner)
self.con = libtcod.console_new(conf.SCREEN_WIDTH, conf.SCREEN_HEIGHT)
self.player = entity.Player(self, conf.GAME_WIDTH / 2, conf.GAME_HEIGHT / 2)
self.entities.append(self.player)
self.makeMap()
def makeMap(self):
self.map = map.Map([[tile.BrickWall(self) for y in xrange(50)] for x in xrange(50)])
def update(self, key):
camera.Camera.X = self.player.x
camera.Camera.Y = self.player.y
self.handleKeys(key)
def draw(self):
self.map.draw(self.con)
self.player.draw(self.con)
x = camera.Camera.X - (conf.GAME_WIDTH / 2)
y = camera.Camera.Y - (conf.SCREEN_HEIGHT / 2)
libtcod.console_blit(self.con, x, y, conf.SCREEN_WIDTH, conf.SCREEN_HEIGHT, 0, 0, 0)
def erase(self):
self.player.erase()
def handleKeys(self, key):
if libtcod.console_is_key_pressed(libtcod.KEY_UP):
self.player.moveAttack(0, -1)
elif libtcod.console_is_key_pressed(libtcod.KEY_DOWN):
self.player.moveAttack(0, 1)
elif libtcod.console_is_key_pressed(libtcod.KEY_LEFT):
self.player.moveAttack(-1, 0)
elif libtcod.console_is_key_pressed(libtcod.KEY_RIGHT):
self.player.moveAttack(1, 0)
| [
"raeaw@localhost.localdomain"
] | raeaw@localhost.localdomain |
5e6d226ff5ed4743fee68756755e8774934afc02 | 69c1e3b0fcb12839c820cff744bf492546b2dbf7 | /filehandling.py | ec4e81fd304928b9e3c98270367f478c2145b501 | [] | no_license | 260734/260734 | 71d844ae3032a762d8a96f3052df3a7ba4b0f844 | 40c02327ff82ad89d3583595913460f245f5ca77 | refs/heads/master | 2023-04-17T08:20:42.848803 | 2021-04-26T19:50:39 | 2021-04-26T19:50:39 | 359,469,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | f=open("e:/evening.txt", 'r')
data=f.read()
print(data)
f.close()
f=open("e:/evening.txt", 'r')
lines=f.readlines()
print(lines)
for line in lines:
print(line, end='')
f.close()
f=open("e:/evening.txt", 'r')
line1=f.readline()
print(line1)
line2=f.readline()
print(line2)
line3=f.readline()
print(line3)
f.close() | [
"sanskarsinha1999@gmail.com"
] | sanskarsinha1999@gmail.com |
b7e79f735e80d330e67e51a22912765914d29558 | ef484abfa738d555758f6c4a99afe8afaa0057f8 | /2019/test/test_day6.py | 4282fccec89a1923f67e84dadc50590e0cde997e | [] | no_license | erik-hedlund/AdventOfCode | 2ae2cd0a1e9176799cac75742555f1123fafa16b | c3d6f34be38d53cb5429da37591796b4137362ef | refs/heads/master | 2020-09-25T16:27:38.946015 | 2020-01-10T10:43:36 | 2020-01-10T10:43:36 | 226,043,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | import day6
def fake_input_data() -> dict:
input_file = ['COM)B', 'B)C', 'C)D', 'D)E', 'E)F', 'B)G', 'G)H', 'D)I', 'E)J', 'J)K', 'K)L', 'K)YOU', 'I)SAN']
orbits_map = dict()
for line in input_file:
key_value = line.strip().split(')')
orbits_map[key_value[1]] = key_value[0]
return orbits_map
def test_solution_part_two():
assert day6.solution_part_two(fake_input_data()) == 4
| [
"erik.markus.hedlund@gmail.com"
] | erik.markus.hedlund@gmail.com |
0877b42f8d0eff730ebc104cda8d3240bcaae07b | c805d53234826b0796ac20312d880d572b21d22f | /crosshair/examples/PEP316/bugs_detected/showcase.py | 1d77aa1c5e77f47cc3273e0739fec834568d309c | [
"MIT"
] | permissive | JinghanCode/Crosshair-Hypothesis | 3f1d1bf4d49506521ff0f94fc08c1cd4316aa295 | cbad2586fb797b2fa2326e5a9f550c6d56810f2e | refs/heads/master | 2023-04-30T22:09:16.683040 | 2021-05-03T02:28:24 | 2021-05-03T02:28:24 | 347,983,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | from typing import Sequence, Dict, TypeVar, List, Tuple, Callable
T = TypeVar("T")
def list_to_dict(s: Sequence[T]) -> Dict[T, T]:
"""
post: len(__return__) == len(s)
# False; CrossHair finds a counterexample with duplicate values in the input.
"""
return dict(zip(s, s))
def consecutive_pairs(x: List[T]) -> List[Tuple[T, T]]:
"""
post: len(__return__) == len(x) - 1
# False (on an empty input list)
"""
return [(x[i], x[i + 1]) for i in range(len(x) - 1)]
def higher_order(fn: Callable[[int], int]) -> int:
"""
Crosshair can find models for pure callables over atomic types.
post: _ != 42
# False (when given something like lambda a: 42 if (a == 0) else 0)
"""
return fn(fn(100))
def append_fourtytwo_to_each(lists: List[List[int]]):
"""
post: all(len(x) == len(__old__.lists[i]) + 1 for i, x in enumerate(lists))
# False when two elements of the input are the SAME list!
"""
for l in lists:
l.append(42)
| [
"noreply@github.com"
] | noreply@github.com |
0e1a4fcb7ffab78f777f35a211da27a75bddde83 | 637bc1e67be832b40e1b478b71aa053a6cbf4f00 | /ecommerce/addresses/migrations/0001_initial.py | d06dc113df13b62387aec33c6ab4ca7fdddcf17b | [] | no_license | shohag000/e-commerce | 0d96368b4244ac52fbea66d11593513e4ea25734 | ab32c07f36b7cc742be4def2fc70500b1a7ca50f | refs/heads/master | 2020-04-22T23:06:07.402629 | 2019-02-14T17:31:54 | 2019-02-14T17:31:54 | 170,729,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | # Generated by Django 2.0.5 on 2019-02-06 05:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('billing', '0002_auto_20190204_1314'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address_type', models.CharField(choices=[('billing', 'Billing'), ('shipping', 'Shipping')], max_length=120)),
('address_line_1', models.CharField(max_length=120)),
('address_line_2', models.CharField(blank=True, max_length=120, null=True)),
('city', models.CharField(max_length=120)),
('country', models.CharField(default='Bangladesh', max_length=120)),
('state', models.CharField(max_length=120)),
('postal_code', models.CharField(max_length=120)),
('billing_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='billing.BillinigProfile')),
],
),
]
| [
"mrshohagislam@yahoo.com"
] | mrshohagislam@yahoo.com |
d1fb9c3f72c4b54a1b092716c302151a59831956 | 22169449dc27ae9f4f64340b4aa9819b355daac4 | /browser/migrations/0035_auto_20170107_0636.py | 7767a2f20a4814a7ed2e443485debfcf25b40bf2 | [
"MIT"
] | permissive | MRCIEU/melodi | 7382a5584fdc1420744eb5a1d3a75016fdc6a189 | 876c9f5222f3d46cedfdcdbb6c091ec5d559a490 | refs/heads/master | 2021-07-07T06:44:15.542037 | 2021-06-10T21:36:27 | 2021-06-10T21:36:27 | 84,560,238 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('browser', '0034_auto_20161216_0811'),
]
operations = [
#migrations.AddField(
# model_name='compare',
# name='share',
# field=models.BooleanField(default=False),
#),
migrations.AlterField(
model_name='searchset',
name='ss_desc',
field=models.CharField(max_length=5000),
),
]
| [
"be15516@bristol.ac.uk"
] | be15516@bristol.ac.uk |
682039f30aaa220caa90f937bbaf5bd7075dd986 | fad752f7e4ae9c9fae7a472634a712249fb6f83f | /sato/cli.py | 9697a09e053b96555f2b63cdabb75bc724fcc61c | [
"Apache-2.0"
] | permissive | VIDA-NYU/sato | 895da0de833681335ec5122c4487555d2285f351 | 8fb51787b36114df13f54c1acd11df12a66ad3e4 | refs/heads/master | 2021-07-13T16:55:53.621521 | 2020-11-26T01:01:07 | 2020-11-26T01:01:07 | 225,955,500 | 0 | 0 | Apache-2.0 | 2019-12-04T20:56:16 | 2019-12-04T20:56:15 | null | UTF-8 | Python | false | false | 2,252 | py | import click
import pandas as pd
from sato.predict import evaluate
@click.command('predict')
@click.option(
'-n', '--count',
default=1000,
help='Sample size'
)
@click.argument(
'src',
nargs=-1,
type=click.Path(file_okay=True, dir_okay=False, exists=True)
)
def run_predict(count, src):
"""Predict column types for CSV file(s)."""
for filename in src:
# This is a very basic attempt to determine the file compression and
# delimiter from the suffix. Currently, the following four oprions are
# recognized: '.csv', '.csv.gz', '.tsv', '.tsv.gz'. Files ending with
# '.gz' are assumed to be compressed by 'gzip' all other files are
# considered as uncompressed. The delimiter for '.csv' files is ',' and
# for '.tsv' files the delimiter is '\t'.
if filename.endswith('.csv'):
compression = None
delimiter = ','
elif filename.endswith('.csv.gz'):
compression = 'gzip'
delimiter = ','
elif filename.endswith('.tsv'):
compression = None
delimiter = '\t'
elif filename.endswith('.tsv.gz'):
compression = 'gzip'
delimiter = '\t'
else:
raise ValueError('unrecognized file format')
try:
df = pd.read_csv(
filename,
delimiter=delimiter,
compression=compression,
low_memory=False
)
rows = df.shape[0]
print('\n{}'.format(filename))
print('{}'.format('-' * len(filename)))
if rows == 0:
# Skip empty files.
continue
if rows > count:
# Take sample for large files.
df = df.sample(n=count, random_state=1)
# Evaluate data frame to get predicted coluumn labels.
labels = evaluate(df)
for i in range(len(df.columns)):
print('%s: %s' % (df.columns[i], labels[i]))
except Exception as ex:
print('error {}'.format(ex))
@click.group()
def cli(): # pragma: no cover
"""Command line interface for SATO."""
pass
cli.add_command(run_predict)
| [
"heiko.muller@gmail.com"
] | heiko.muller@gmail.com |
552428aca5f47dea4152ffc96235d45c2afd1431 | 57e88a8f99a19ab862c646ecd863e9327f7630c6 | /Algo/InterviewBit/colorful-number.py | c18692e9f6587c11112a547184ce308f6fe402f1 | [] | no_license | ayushgoel/PythonScripts | a1b7983f36160d85e4cfe8f28ddbc1d202a2bbd0 | 65817a31f3081f860bab79cee4acfbe40f964fed | refs/heads/master | 2021-06-07T06:44:29.493175 | 2020-05-29T06:01:37 | 2020-05-29T06:01:37 | 3,824,558 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # https://www.interviewbit.com/problems/colorful-number/
class Solution:
# @param A : integer
# @return an integer
def colorful(self, a):
stra = str(a)
all_products = set()
for l in xrange(1, len(stra)+1):
# print(l)
for i in xrange(len(stra)-l+1):
subsa = stra[i:i+l]
subsa_int = [int(j) for j in subsa]
subsa_multiply_ans = reduce(lambda x,y: x*y, subsa_int)
if subsa_multiply_ans in all_products:
return 0
all_products.add(subsa_multiply_ans)
# print("X",subsa)
return 1
s = Solution()
print(s.colorful(23))
| [
"ayushgoel111@gmail.com"
] | ayushgoel111@gmail.com |
861e5551a2470099b1e93fb1307fa9a1827e5f14 | f891b3538555b2f56ea3c1d5f374429e436b7406 | /bj1343.py | ea570fd7239dc18968c7fbb4cf8f262d0bebb930 | [] | no_license | PulseRoot/Baekjoon_Tier_Silver | 0b5f1c6384563b19f7739bdf60cb27b9f546ff37 | ab31de91a4ecb1aa7ef425d8a8f3013cdebfcaab | refs/heads/master | 2023-08-16T20:03:13.055627 | 2021-09-25T17:30:47 | 2021-09-25T17:30:47 | 402,993,467 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | #폴리오미노
x = input() #X와 . 입력
y = ""
chk = 0
while(True):
if len(x) == 0:
break
if len(x) == 1:
chk = -1
print(chk)
break
if x[0] == ".":
while(x[0] == "."):
x = x[1:]
y = y + "."
if x[0:2] == "XX":
if x[0:4] == "XXXX":
x = x[4:]
y = y + "A" * 4
elif x[0:7] == "XXXXXX":
x = x[7:]
y = y + "A" * 4 + "B" * 2
else:
x = x[2:]
y = y + "B" * 2
else:
chk = -1
print(chk)
break
if chk == 0:
print(y)
| [
"noreply@github.com"
] | noreply@github.com |
acc0cbbbbef590f361a5a6744807f18458d0e078 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/130/usersdata/228/34476/submittedfiles/al8.py | 99d23561646b83280774cd80f4ab4ad83803ccaf | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | # -*- coding: utf-8 -*-
n=int(input('digite um valor:')
nfat=1
for i in range(2,n+1):
nfat=nfat+i
print(nfat)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8bb467d7c526fcb224daf9d8607a15ba948076b6 | 06cdb84cd4ff12f9734194eae794bc6381c36bc6 | /ModuleSystem/ID_mission_templates.py | d27e9f5afc8155443669ce08e1f1fe8d93272f5f | [] | no_license | khamukkamu/HYW-1.66-Kham | 25d2a0a5786221af3414174756334af6878198ac | 586ae7bd37fb52ee09fa8081cc520dc36a711fa7 | refs/heads/master | 2021-08-16T02:10:07.565299 | 2018-03-03T19:43:04 | 2018-03-03T19:43:04 | 115,287,214 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,102 | py | mt_town_default = 0
mt_conversation_encounter = 1
mt_town_center = 2
mt_village_center = 3
mt_bandits_at_night = 4
mt_village_training = 5
mt_visit_town_castle = 6
mt_back_alley_kill_local_merchant = 7
mt_back_alley_revolt = 8
mt_lead_charge = 9
mt_village_attack_bandits = 10
mt_village_raid = 11
mt_besiege_inner_battle_castle = 12
mt_besiege_inner_battle_town_center = 13
mt_castle_attack_walls_defenders_sally = 14
mt_castle_attack_walls_belfry = 15
mt_castle_attack_walls_ladder = 16
mt_castle_visit = 17
mt_training_ground_trainer_talk = 18
mt_training_ground_trainer_training = 19
mt_training_ground_training = 20
mt_sneak_caught_fight = 21
mt_ai_training = 22
mt_camera_test = 23
mt_arena_melee_fight = 24
mt_arena_challenge_fight = 25
mt_duel_with_lord = 26
mt_wedding = 27
mt_tutorial_training_ground = 28
mt_tutorial_1 = 29
mt_tutorial_2 = 30
mt_tutorial_3 = 31
mt_tutorial_3_2 = 32
mt_tutorial_4 = 33
mt_tutorial_5 = 34
mt_quick_battle_battle = 35
mt_quick_battle_siege = 36
mt_multiplayer_dm = 37
mt_multiplayer_tdm = 38
mt_multiplayer_hq = 39
mt_multiplayer_cf = 40
mt_multiplayer_sg = 41
mt_multiplayer_bt = 42
mt_multiplayer_fd = 43
mt_bandit_lair = 44
mt_alley_fight = 45
mt_meeting_merchant = 46
mt_town_fight = 47
mt_snow_campement = 48
mt_foret_campement = 49
mt_campagne_campement = 50
mt_plain_campement = 51
mt_town_quartiers = 52
mt_town_quartiers_roeun_quest = 53
mt_town_quartiers_bourges = 54
mt_town_quartiers_bourges_dumy = 55
mt_town_quartiers_rennes_quest = 56
mt_quartiers_bourges_taverntl = 57
mt_t_quartiers_bourges_entrepots = 58
mt_t_quartiers_battle_entrepots = 59
mt_town_paris_ecurie = 60
mt_cata_district_entry = 61
mt_abbayes_generiques = 62
mt_foret_generique = 63
mt_tour_pins = 64
mt_riviere_star_battle = 65
mt_riviere_battle = 66
mt_town_variation_quartiers = 67
mt_bataill_etp_metz = 68
mt_entrepots_metz = 69
mt_pf_bourg = 70
mt_pf_bovictoire = 71
mt_catacomb_crypte = 72
mt_trois_sirenn = 73
mt_pugilatt = 74
mt_catacomb_reserv = 75
mt_catacomb_attk_crypte = 76
mt_mat_farmer = 77
mt_campachass = 78
mt_manoir_assasiin = 79
mt_town_caserne = 80
mt_lyon_caserne = 81
mt_town_vieuxchateau = 82
mt_scene_debut = 83
mt_scene_pirates = 84
mt_chateau_ducale = 85
mt_chateau_ducale_arba = 86
mt_chateau_ducale_epee = 87
mt_english_campement_atack = 88
mt_toul_vil = 89
mt_foret_verzycamp = 90
mt_fierbois_cat = 91
mt_chapelle_interior = 92
mt_foretrouen = 93
mt_foret_braconier = 95
mt_loge_secrete = 96
mt_monatere_orl2 = 97
mt_monatere_orl = 98
mt_rennes_chateau_chat = 99
mt_grotte_rennes_chat = 100
mt_rennes_chateau_interieur = 101
mt_loge_rouen = 102
mt_joutes = 103
mt_joutes_eng = 104
mt_joutes_brg = 105
mt_joutes_bret = 106
mt_joutes_a_pied = 107
mt_joutes_a_pied_eng = 108
mt_joutes_a_pied_brg = 109
mt_joutes_a_pied_bret = 110
mt_training_joutes = 111
mt_auberges1 = 112
mt_auberges2 = 113
mt_auberges3 = 114
mt_auberges4 = 115
mt_auberges_int_1 = 116
mt_auberges_int_2 = 117
mt_auberges_int_3 = 118
mt_auberges_int_4 = 119
mt_maison_auberge_quete = 120
mt_prairie_bourge = 121
mt_rpg_forest1 = 122
mt_rpg_forest2 = 123
mt_rpg_forest3 = 124
mt_rpg_forest4 = 125
mt_rpg_forest5 = 126
mt_rpg_forest5coupe_gorge = 127
mt_rpg_forest6 = 128
mt_rpg_forest6portelac = 129
mt_rpg_forest7 = 130
mt_rpg_forest8 = 131
mt_rpg_forest9 = 132
mt_rpg_forest9quete_terrier = 133
mt_rpg_forest10 = 134
mt_rpg_forest10aventurier = 135
mt_rpg_forest11 = 136
mt_rpg_forest11artus = 137
mt_maison_pilotis = 138
mt_mine_fer_broceliand_lvl_1 = 139
mt_mine_fer_broceliand_lvl_2 = 140
mt_mine_fer_broceliand_lvl_3 = 141
mt_grotte_bandits_broceliand_lvl_1 = 142
mt_grotte_bandits_broceliand_lvl_2 = 143
mt_grotte_bandits_sortie = 144
mt_broceliand_auberge_inter = 145
mt_rpg_auberge_mine_liberee = 146
mt_bois_descerfs = 147
mt_auberge_foret_eneigee = 148
mt_camp_crane_defer = 149
mt_camp_crane_deferdial = 150
mt_chat_panthievre = 151
mt_grotte_vers_panth = 152
mt_penthievre_chat_cour = 153
mt_multiplayer_duel = 154
mt_freelancer_training = 155
mt_freelancer_charge = 156
mt_arena_challenge_fight_weapons = 157
mt_arena_challenge_fight_fists = 158
mt_scene_chooser = 159
| [
"kham0214@gmail.com"
] | kham0214@gmail.com |
0c9b7cc93bca5fbfdde30478125a1eab317ecbb9 | 098507ab558c1a22479bbc13902906760dd7f25e | /logtoes/utils/api_utils.py | b487d61750fcad919f1dad74675eb24455b743f4 | [
"MIT"
] | permissive | adhorn/logtoes | 0800b56d1a6b9c321ffd1800d9d7550f62a958c5 | 41f6029e928cb5cdd2753e9c42e67fd1e56f5259 | refs/heads/master | 2023-02-22T10:01:49.110772 | 2022-04-13T17:39:43 | 2022-04-13T17:39:43 | 92,207,489 | 48 | 5 | MIT | 2023-02-15T21:34:32 | 2017-05-23T18:41:14 | Python | UTF-8 | Python | false | false | 4,361 | py | from flask import current_app, g, request, jsonify
from functools import wraps
import json
from datetime import datetime
from logtoes.celery import tasks
def api_error_response(code=404, message="Resource not found", errors=list()):
response = jsonify(
dict(code=code, message=message, errors=errors, success=False))
response.status_code = code
return response
def bad_json_error_response():
return api_error_response(
code=400,
message="Please provide valid JSON."
)
class ModelJSONEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'to_dict'):
return obj.to_dict()
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return json.JSONEncoder.default(self, obj)
def json_response(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
result = fn(*args, **kwargs)
if isinstance(result, current_app.response_class):
return result
if isinstance(result, (list, tuple)):
result = {'items': result}
data = json.dumps(result, cls=ModelJSONEncoder)
return current_app.response_class(data, mimetype='application/json')
return wrapped
def log_entry(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
result = fn(*args, **kwargs)
status_code = result.status_code
auth_user = "guest"
if request.authorization:
auth_user = request.authorization.username
if request.authorization.password == 'unused':
auth_user = g.user.verify_auth_token(
request.authorization.username).username
ip_address = request.environ.get('HTTP_X_FORWARDED_HOST')
if not ip_address:
ip_address = request.environ.get('REMOTE_ADDR', '0.0.0.0')
if ip_address:
ip_address = ip_address.split(',')[0]
data = {
'user': '{}'.format(auth_user),
'request': '{0} {1}'.format(request.method, request.path),
'status code': '{}'.format(status_code),
'query': '{}'.format(request.query_string),
'ip': '{}'.format(ip_address),
'agent': '{0} | {1} {2}'.format(
request.user_agent.platform,
request.user_agent.browser,
request.user_agent.version),
'raw agent': '{}'.format(request.user_agent.string),
'@timestamp': datetime.utcnow().strftime(
"%Y-%m-%dT%H:%M:%S")
}
tasks.prep_to_elk.delay(data, 'api_requests')
data_firehose = {
'user': '{}'.format(auth_user),
'request': '{0} {1}'.format(request.method, request.path),
'status_code': '{}'.format(status_code),
'query': '{}'.format(request.query_string),
'ip': '{}'.format(ip_address),
'agent': '{0} | {1} {2}'.format(
request.user_agent.platform,
request.user_agent.browser,
request.user_agent.version),
'raw_agent': '{}'.format(request.user_agent.string),
'datetime': datetime.utcnow().strftime(
"%Y-%m-%d %H:%M:%S")
}
tasks.prep_to_firehose.delay(data_firehose, 'Data')
current_app.logger.debug(
"""
User: {user}
Request: {method} {path}
Code: {status_code}
Query: {query}
IP: {ip}
Agent: {agent_platform} | {agent_browser} {agent_browser_version}
Raw Agent: {agent}
Time: {time}
Args: {args}
Kwargs: {kwargs}
""".format(method=request.method,
query=request.query_string,
status_code=status_code,
path=request.path,
ip=ip_address,
agent_platform=request.user_agent.platform,
agent_browser=request.user_agent.browser,
agent_browser_version=request.user_agent.version,
agent=request.user_agent.string,
user=auth_user,
time=datetime.utcnow(),
args=dict(request.args or []),
kwargs=dict(**kwargs or {}),
)
)
return result
return wrapped
| [
"adhorn@amazon.com"
] | adhorn@amazon.com |
8e9f1d89a0a10175a73f79346baaea3a012c4479 | 3a5ea75a5039207104fd478fb69ac4664c3c3a46 | /vega/algorithms/nas/modnas/estim/dist_backend/base.py | 1725fd222057fa4b91024747947592087e159828 | [
"MIT"
] | permissive | fmsnew/vega | e3df25efa6af46073c441f41da4f2fdc4929fec5 | 8e0af84a57eca5745fe2db3d13075393838036bb | refs/heads/master | 2023-06-10T04:47:11.661814 | 2021-06-26T07:45:30 | 2021-06-26T07:45:30 | 285,174,199 | 0 | 0 | MIT | 2020-08-11T14:19:09 | 2020-08-05T03:59:49 | Python | UTF-8 | Python | false | false | 1,712 | py | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Distributed remote client and server."""
import threading
class RemoteBase():
"""Distributed remote client class."""
def __init__(self):
super().__init__()
self.on_done = None
self.on_failed = None
def call(self, func, *args, on_done=None, on_failed=None, **kwargs):
"""Call function on remote client with callbacks."""
self.on_done = on_done
self.on_failed = on_failed
self.th_rpc = threading.Thread(target=self.rpc, args=(func,) + args, kwargs=kwargs)
self.th_rpc.start()
def close(self):
"""Close the remote client."""
raise NotImplementedError
def rpc(self, func, *args, **kwargs):
"""Call function on remote client."""
raise NotImplementedError
def on_rpc_done(self, ret):
"""Invoke callback when remote call finishes."""
self.ret = ret
self.on_done(ret)
def on_rpc_failed(self, ret):
"""Invoke callback when remote call fails."""
self.on_failed(ret)
class WorkerBase():
"""Distributed remote worker (server) class."""
def run(self, estim):
"""Run worker."""
raise NotImplementedError
def close(self):
"""Close worker."""
raise NotImplementedError
| [
"zhangjiajin@huawei.com"
] | zhangjiajin@huawei.com |
dd7a3ac6d291dc2db98817190f8813c458576953 | 66dd570bf5945dcbd183ed3c0cf897c0359cbccd | /python/python语法/pyexercise/Exercise03_09.py | 4560a8df9de30b98aa5d9640c98b118b4dc4a3be | [] | no_license | SamJ2018/LeetCode | 302cc97626220521c8847d30b99858e63fa509f3 | 784bd0b1491050bbd80f5a0e2420467b63152d8f | refs/heads/master | 2021-06-19T10:30:37.381542 | 2021-02-06T16:15:01 | 2021-02-06T16:15:01 | 178,962,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | # Obtain input
name = input("Enter employee's name: ")
hours = eval(input("Enter number of hours worked in a week: "))
payRate = eval(input("Enter hourly pay rate: "))
fedTaxWithholdingRate = eval(input("Enter federal tax withholding rate: "))
stateTaxWithholdingRate = eval(input("Enter state tax withholding rate: "))
grossPay = hours * payRate
fedTaxWithholding = grossPay * fedTaxWithholdingRate
stateTaxWithholding = grossPay * stateTaxWithholdingRate
totalDeduction = fedTaxWithholding + stateTaxWithholding
netPay = grossPay - totalDeduction
# Obtain output
out = "Employee Name: " + name + "\n\n"
out += "Hours Worked: " + str(hours) + '\n'
out += "Pay Rate: $" + str(payRate) + '\n'
out += "Gross Pay: $" + str(grossPay) + '\n'
out += "Deductions:\n"
out += " Federal Withholding (" + str(fedTaxWithholdingRate * 100) + \
"%): $" + str(int(fedTaxWithholding * 100) / 100.0) + '\n'
out += " State Withholding (" + str(stateTaxWithholdingRate * 100) + "%):" + \
" $" + str(int(stateTaxWithholding * 100) / 100.0) + '\n';
out += " Total Deduction:" + " $" + \
str(int(totalDeduction * 100) / 100.0) + '\n'
out += "Net Pay:" + " $" + str(int(netPay * 100) / 100.0)
print(out)
| [
"juksam@centos7.localdomain"
] | juksam@centos7.localdomain |
f531d8e47a46f16095ff0a4522cfedaf5eca3518 | b8688a6c1824335808182768c3349624722abba6 | /uamqp/constants.py | 987bcaef27fd21d840f5b9e8ca36ca97fd73228c | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | gdooper/azure-uamqp-python | 65d64e19190921c16cc65947ddcb01f686cd4277 | 8a71c86c7598b439afea28f216a97437b3ebaaed | refs/heads/master | 2020-03-30T00:33:55.710726 | 2018-05-29T16:06:34 | 2018-05-29T16:06:34 | 150,530,862 | 0 | 0 | MIT | 2018-09-27T04:57:31 | 2018-09-27T04:57:31 | null | UTF-8 | Python | false | false | 3,876 | py | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from enum import Enum
from uamqp import c_uamqp
DEFAULT_AMQPS_PORT = 5671
AUTH_EXPIRATION_SECS = c_uamqp.AUTH_EXPIRATION_SECS
AUTH_REFRESH_SECS = c_uamqp.AUTH_REFRESH_SECS
STRING_FILTER = b"apache.org:selector-filter:string"
OPERATION = b"operation"
READ_OPERATION = b"READ"
MGMT_TARGET = b"$management"
MESSAGE_SEND_RETRIES = 3
BATCH_MESSAGE_FORMAT = c_uamqp.AMQP_BATCH_MESSAGE_FORMAT
MAX_FRAME_SIZE_BYTES = c_uamqp.MAX_FRAME_SIZE_BYTES
MAX_MESSAGE_LENGTH_BYTES = c_uamqp.MAX_MESSAGE_LENGTH_BYTES
class MessageState(Enum):
WaitingToBeSent = 0
WaitingForAck = 1
Complete = 2
Failed = 3
DONE_STATES = (MessageState.Complete, MessageState.Failed)
class MessageReceiverState(Enum):
Idle = c_uamqp.MESSAGE_RECEIVER_STATE_IDLE
Opening = c_uamqp.MESSAGE_RECEIVER_STATE_OPENING
Open = c_uamqp.MESSAGE_RECEIVER_STATE_OPEN
Closing = c_uamqp.MESSAGE_RECEIVER_STATE_CLOSING
Error = c_uamqp.MESSAGE_RECEIVER_STATE_ERROR
class MessageSendResult(Enum):
Ok = c_uamqp.MESSAGE_SEND_OK
Error = c_uamqp.MESSAGE_SEND_ERROR
Timeout = c_uamqp.MESSAGE_SEND_TIMEOUT
Cancelled = c_uamqp.MESSAGE_SEND_CANCELLED
class MessageSenderState(Enum):
Idle = c_uamqp.MESSAGE_SENDER_STATE_IDLE
Opening = c_uamqp.MESSAGE_SENDER_STATE_OPENING
Open = c_uamqp.MESSAGE_SENDER_STATE_OPEN
Closing = c_uamqp.MESSAGE_SENDER_STATE_CLOSING
Error = c_uamqp.MESSAGE_SENDER_STATE_ERROR
class ManagementLinkState(Enum):
Ok = c_uamqp.AMQP_MANAGEMENT_OPEN_OK
Error = c_uamqp.AMQP_MANAGEMENT_OPEN_ERROR
Cancelled = c_uamqp.AMQP_MANAGEMENT_OPEN_CANCELLED
class ManagementOperationResult(Enum):
Ok = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_OK
Error = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_ERROR
BadStatus = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_FAILED_BAD_STATUS
Closed = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_INSTANCE_CLOSED
class Role(Enum):
Sender = c_uamqp.ROLE_SENDER
Receiver = c_uamqp.ROLE_RECEIVER
class SenderSettleMode(Enum):
Unsettled = c_uamqp.SENDER_SETTLE_MODE_UNSETTLED
Settled = c_uamqp.SENDER_SETTLE_MODE_SETTLED
Mixed = c_uamqp.SENDER_SETTLE_MODE_MIXED
class ReceiverSettleMode(Enum):
PeekLock = c_uamqp.RECEIVER_SETTLE_MODE_PEEKLOCK
ReceiveAndDelete = c_uamqp.RECEIVER_SETTLE_MODE_RECEIVEANDDELETE
class CBSOperationResult(Enum):
Ok = c_uamqp.CBS_OPERATION_RESULT_OK
Error = c_uamqp.CBS_OPERATION_RESULT_CBS_ERROR
Failed = c_uamqp.CBS_OPERATION_RESULT_OPERATION_FAILED
Closed = c_uamqp.CBS_OPERATION_RESULT_INSTANCE_CLOSED
class CBSOpenState(Enum):
Ok = c_uamqp.CBS_OPEN_COMPLETE_OK
Error = c_uamqp.CBS_OPEN_COMPLETE_ERROR
Cancelled = c_uamqp.CBS_OPEN_COMPLETE_CANCELLED
class CBSAuthStatus(Enum):
Ok = c_uamqp.AUTH_STATUS_OK
Idle = c_uamqp.AUTH_STATUS_IDLE
InProgress = c_uamqp.AUTH_STATUS_IN_PROGRESS
Timeout = c_uamqp.AUTH_STATUS_TIMEOUT
RefreshRequired = c_uamqp.AUTH_STATUS_REFRESH_REQUIRED
Expired = c_uamqp.AUTH_STATUS_EXPIRED
Error = c_uamqp.AUTH_STATUS_ERROR
Failure = c_uamqp.AUTH_STATUS_FAILURE
class MgmtExecuteResult(Enum):
Ok = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_OK
Error = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_ERROR
Failed = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_FAILED_BAD_STATUS
Closed = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_INSTANCE_CLOSED
class MgmtOpenStatus(Enum):
Ok = c_uamqp.AMQP_MANAGEMENT_OPEN_OK
Error = c_uamqp.AMQP_MANAGEMENT_OPEN_ERROR
Cancelled = c_uamqp.AMQP_MANAGEMENT_OPEN_CANCELLED
| [
"antisch@microsoft.com"
] | antisch@microsoft.com |
ffaac00428876e73eba0463a424ba4b297b64fa1 | f29491639e1f68426657063614fd7a161ea2977f | /urlshorter.py | 3d141786e6dbc2cbc9aad0dfa64db42992d7f892 | [] | no_license | zillion45/soruto | 8bd3c6e358dbfbed2b600cb89b3bad54a68815f2 | 243abf78c15451acd892afaab8e531e6c6fe94e6 | refs/heads/master | 2021-01-20T20:35:44.858391 | 2016-08-03T04:22:51 | 2016-08-03T04:22:51 | 64,813,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,962 | py | # -*- coding: utf-8 -*-
import os
import string
import random
from datetime import datetime
from urlparse import urlparse
from flask import Flask, request, render_template, redirect, abort
from flask.ext.sqlalchemy import SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.db')
db = SQLAlchemy(app)
class URLShortener(db.Model):
__tablename__ = 'url'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
code = db.Column(db.String(10), unique=True)
url = db.Column(db.String(500), unique=True)
created = db.Column(db.DateTime, default=datetime.now)
visit_count = db.Column(db.Integer, default=0)
def __init__(self, url):
self.url = url
code = self.get_code(5)
self.code = code
@staticmethod
def get_code(size):
return "".join(random.sample(string.ascii_letters + string.digits, size))
@app.route('/', methods=['GET', 'POST'])
def home():
short_url = ''
if request.method == 'POST':
original_url = request.form.get("url")
if urlparse(original_url).scheme == '':
original_url = 'http://' + original_url
tmp = URLShortener.query.filter_by(url=original_url).first()
if tmp:
url = tmp
else:
url = URLShortener(url=original_url)
db.session.add(url)
db.session.commit()
short_url = request.host + '/' + url.code
return render_template('home.html', short_url = short_url)
@app.route('/<string:code>')
def get_original_url(code):
original_url = URLShortener.query.filter_by(code=code).first()
if not original_url:
abort(404)
original_url.visit_count += 1
db.session.add(original_url)
db.session.commit()
return redirect(original_url.url)
if __name__ == '__main__':
db.create_all()
app.run(debug=True)
| [
"chungwing45@gmail.com"
] | chungwing45@gmail.com |
d41c69e29c794cbabb1c2e1f208a21b4bf0f2f48 | 0e8b6f94467c25dd2440f7e2ea1519244e689620 | /MarlinJobs/CalibrationConfigFiles/Stage27Config_5x5_30x30.py | 3435a6f9a3f6a73455fa0470d23dcbb790425599 | [] | no_license | StevenGreen1/HighEnergyPhotonAnalysis | 97a661eaca2efd00472f1969855c724c9d505369 | 8a82ac57f56aad5bdbe99d4a5afb771592bc1725 | refs/heads/master | 2021-01-10T14:08:50.550184 | 2015-10-12T12:43:47 | 2015-10-12T12:43:47 | 43,491,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | # Calibration config file for testing
# Digitisation Constants - ECal
CalibrECal = 42.4326603502
# Digitisation Constants ILDCaloDigi - HCal
CalibrHCalBarrel = 49.057884929
CalibrHCalEndcap = 54.1136311832
CalibrHCalOther = 29.2180288685
# Digitisation Constants NewLDCCaloDigi - HCal
CalibrHCal = -1
# Digitisation Constants - Muon Chamber
CalibrMuon = 56.7
# MIP Peak position in directed corrected SimCaloHit energy distributions
# used for realistic ECal and HCal digitisation options
CalibrECalMIP = -1
CalibrHCalMIP = 0.0004925
# MIP Peak position in directed corrected CaloHit energy distributions
# used for MIP definition in PandoraPFA
ECalToMIPCalibration = 158.73
HCalToMIPCalibration = 40.8163
MuonToMIPCalibration = 10.101
# EM and Had Scale Settings
ECalToEMGeVCalibration = 1.00062269867
HCalToEMGeVCalibration = 1.00062269867
ECalToHadGeVCalibration = 1.08773337955
HCalToHadGeVCalibration = 1.04823493932
# Pandora Threshold Cuts
ECalMIPThresholdPandora = 0.5
HCalMIPThresholdPandora = 0.3
# Hadronic Energy Truncation in HCal PandoraPFA
MaxHCalHitHadronicEnergy = 1000000.0
# Timing ECal
ECalBarrelTimeWindowMax = 1000000.0
ECalEndcapTimeWindowMax = 1000000.0
# Timing HCal
HCalBarrelTimeWindowMax = 1000000.0
HCalEndcapTimeWindowMax = 1000000.0
| [
"sg1sg2sg3@hotmail.co.uk"
] | sg1sg2sg3@hotmail.co.uk |
91778635956550f14274b5f9b0e965db80fabb91 | 6874a3d16c941c897e245f0c2fc8d9db8442e770 | /app.py | 20d78d0de78564608a58c69de2a7f4be4358533f | [] | no_license | Kully/minty-budget-tool | fc91beac6ff96d238c4b5fe1091ae1a76b358d83 | e91b12e6be912f3e851a6b64e0a7268334375785 | refs/heads/master | 2023-03-23T07:37:20.014156 | 2021-03-18T23:06:57 | 2021-03-18T23:06:57 | 308,640,343 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26,416 | py | import calendar
import datetime
import json
import os
import random
import dash
import dash_auth
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import flask
import plotly
import plotly.express as px
import plotly.graph_objs as go
import pandas as pd
from dash.dependencies import Input, Output, State
from datetime import date
from datetime import datetime as dt
from flask import request
from layout import layout
from util.users import users_info
from util.constants import *
user_pwd, user_names = users_info()
APP_LOGO = os.path.join("assets", "zyphr-tight.png")
APP_TITLE = "Zyphr"
def Header(title):
"""
Construct a header with the given title.
"""
return html.Div(
style={"marginBottom": 0},
children=[html.H1(style={"fontSize": 30}, children=title), html.Br()],
)
def new_login(topnav):
return html.Div(
id="login-page",
children=[
topnav,
html.Div(
html.Img(src=APP_LOGO, height="80px"),
),
html.Div(
Header("Sign in to Budget App"),
),
dbc.Card(
id="login-card",
children=dbc.CardBody(
html.Form(
id="login-form",
action="/login",
method="POST",
children=[
dbc.Label("Username", html_for="login-username"),
dbc.Input(
id="username-input",
name="login-username",
type="text",
value="bob",
),
html.Br(),
dbc.Label("Password", html_for="login-password"),
dbc.Input(
id="password-input",
name="login-password",
value="bob@123",
type="password",
),
html.Br(),
dbc.Button(
"Login",
id="submit-button",
color="primary",
block=True,
),
]
)
),
color="light",
),
],
)
# old_login_page = html.Div([
# html.Div(topnav, id="top-nav"),
# login_form,
# dbc.Alert([
# html.H5("Log in credentials", className="alert-heading"),
# html.Span([
# html.Span("Usr "),
# html.B("bob", style=UNDERLINE_STYLE),
# html.Span(", Pwd "),
# html.B("bob@123 ", style=UNDERLINE_STYLE),
# ]),
# html.Br(),
# html.Span([
# html.Span("Usr "),
# html.B("sally", style=UNDERLINE_STYLE),
# html.Span(", Pwd "),
# html.B("sally@123 ", style=UNDERLINE_STYLE),
# ]),
# ],
# color="light",
# id="credentials-alert",
# dismissable=True,
# )
# ])
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.MINTY],
suppress_callback_exceptions=True,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1"}
]
)
server = app.server
@app.server.route("/login", methods=["POST"])
def route_login():
data = flask.request.form
username = data.get("login-username")
password = data.get("login-password")
if username not in user_pwd.keys() or user_pwd[username] != password:
return flask.redirect("/login")
else:
rep = flask.redirect("/")
rep.set_cookie("custom-auth-session", username)
return rep
@app.server.route("/logout", methods=["POST"])
def route_logout():
rep = flask.redirect("/login")
rep.set_cookie("custom-auth-session", "", expires=0)
return rep
def string_date_to_date_object(dateString):
y, m, d = dateString.split("-")
y = int(y)
m = int(m)
d = int(d[:2])
return date(y, m, d)
def filter_dataframe_between_two_datetimes(df_filter, start_date, end_date):
mask = (
(df_filter["Date"] > start_date) &
(df_filter["Date"] <= end_date)
)
return df_filter.loc[mask]
def filter_dataframe_one_month(df_filter, yearMonth):
""" yearMonth: "2020-08" -> August, 2020 """
year, month = yearMonth.split("-")
year = int(year)
month = int(month)
last_day_in_month = num_of_days_in_month(year, month)
mask = (
(df_filter["Date"] >= date(year, month, 1)) &
(df_filter["Date"] <= date(year, month, last_day_in_month))
)
return df_filter.loc[mask]
def budget_dataframe_to_hash(json_budget_df):
budget_df = pd.read_json(json_budget_df, orient='split')
category_budget_hash = {}
for idx, row in budget_df.iterrows():
category_budget_hash[row["Category"]] = row["Budget"]
return category_budget_hash
def num_of_days_in_month(year, month):
year = int(year)
month = int(month)
if month == 12:
return (date(year+1, 1, 1) - date(year, month, 1)).days
else:
return (date(year, month+1, 1) - date(year, month, 1)).days
def sum_of_category_expense_in_month(df_filter, category, yearMonth="2020-8"):
df_filter = df_filter[df_filter["Category"] == category]
df_filter = filter_dataframe_one_month(df_filter, yearMonth)
sum_of_amounts = df_filter["Amount"].sum()
return round(sum_of_amounts, 2)
def sum_of_category_expense_between_dates(df_filter, category,
start_date, end_date):
df_filter = df_filter[df_filter["Category"] == category]
df_filter = filter_dataframe_between_two_datetimes(
df_filter, start_date, end_date
)
sum_of_amounts = df_filter["Amount"].sum()
return round(sum_of_amounts, 2)
# original login form
# login_form = dbc.Form(
# [
# dbc.FormGroup(
# [
# dbc.Label("Email", className="mr-2"),
# dbc.Input(type="email", placeholder="Enter email"),
# ],
# className="mr-3",
# ),
# dbc.FormGroup(
# [
# dbc.Label("Password", className="mr-2"),
# dbc.Input(type="password", placeholder="Enter password"),
# ],
# className="mr-3",
# ),
# dbc.Button("Submit", color="primary"),
# ],
# inline=True,
# )
# login_form = html.Div([
# dbc.Row([
# dbc.Col(
# width=12,
# children=[
# html.Form([
# dbc.Label("Username"),
# dbc.Input(placeholder="username", name="username",
# type="text", id="username-input", value="bob"),
# dbc.Label("Password"),
# dbc.Input(placeholder="password", name="password",
# type="password", id="password-input", value="bob@123"),
# html.Br(),
# html.Button("Login", className="btn btn-block btn-primary",
# type="submit", id="submit-button"),
# ], action="/login", method="post")
# ]
# ),
# ]),
# ], id="login-form-div")
# html.Form([
# dbc.Label("Username"),
# dbc.Input(placeholder="username", name="username",
# type="text", id="username-input", value="bob"),
# dbc.Label("Password"),
# dbc.Input(placeholder="password", name="password",
# type="password", id="password-input", value="bob@123"),
# html.Br(),
# html.Button("Login", className="btn btn-block btn-primary",
# type="submit", id="submit-button"),
# ], action="/login", method="post")
# ], id="login-form-div")
def load_budget_dataframe(session_cookie):
df = pd.read_csv("data/" + session_cookie + "_budget.csv")
return df
def load_dataframe(filename):
df = pd.read_csv("data/" + filename + ".csv")
df["Date"] = df["Date"].apply(
lambda d: dt.strptime(d, "%m/%d/%Y").date()
)
balance_col = [7757.00]
balance = balance_col[0]
for idx, row in df.iterrows():
if ("Income" in row["Category"] or
row["Category"] == "Credit Card Payment"):
balance -= row["Amount"]
elif (row["Category"] == "Transfer" and
row["Description"].startswith("E-TRANSFER")):
balance -= row["Amount"]
else:
balance += row["Amount"]
balance = round(balance, 2)
balance_col.append(balance)
df["Balance"] = balance_col[:-1]
return df
def jsonified_data_to_dataframe(jsonified_data):
clean_df = pd.read_json(jsonified_data, orient='split')
clean_df["Date"] = clean_df["Date"].apply(
lambda d: d.date()
)
return clean_df
def transactions_line_chart(dataframe, start_date, end_date, checklistInput):
df_filtered = filter_dataframe_between_two_datetimes(
dataframe,
start_date,
end_date
)
if "smooth" in checklistInput:
shape = "spline"
else:
shape = None
fig = go.Figure(data=[
go.Scatter(
name="all time",
x=dataframe["Date"],
y=dataframe["Balance"],
marker=dict(color="#aaa"),
line=dict(
width=1,
dash="dot",
smoothing=True,
shape=shape
),
),
go.Scatter(
name="spending",
x=df_filtered["Date"],
y=df_filtered["Balance"],
marker=dict(color=GREEN_COLOR),
line=dict(
width=2,
smoothing=True,
shape=shape
)
),
])
showgrid = (True if "toggleGrid" in checklistInput else False)
fig.update_layout(
margin=dict(l=10,r=10,t=10,b=10),
plot_bgcolor=PLOT_BGCOLOR,
xaxis=dict(fixedrange=True),
yaxis=dict(fixedrange=True),
hoverlabel=dict(
bgcolor="#fafafa",
font_size=13,
bordercolor="#555",
)
)
fig.update_xaxes(
tickangle=-45,
showspikes=True,
spikecolor="#bababa",
spikesnap="cursor",
spikemode="across",
spikethickness=1,
spikedash="dot",
gridcolor=GRID_COLOR,
showgrid=showgrid,
tickmode="array",
)
fig.update_yaxes(
zeroline=False,
gridcolor=GRID_COLOR,
showgrid=showgrid,
)
return fig
def budget_bar_chart(dataframe, category_budget_hash, yearMonth="2020-08"):
keys = list(category_budget_hash.keys())
fig = go.Figure(data=[
go.Bar(
name="$ budget",
y=keys,
x=[category_budget_hash[k] for k in keys],
marker=dict(color="#ccc"),
orientation="h",
),
go.Bar(
name="$ spent",
y=keys,
x=[sum_of_category_expense_in_month(dataframe, k, yearMonth)
for k in keys],
marker=dict(color=GREEN_COLOR),
orientation="h",
),
])
fig.update_traces(marker_line_width=0, opacity=0.9)
fig.update_layout(
barmode="group", bargap=0.2, title="Budget Goals",
margin=dict(l=0,r=0,t=40,b=0),
plot_bgcolor=PLOT_BGCOLOR,
xaxis=dict(fixedrange=True),
yaxis=dict(fixedrange=True),
)
return fig
def insights_bar_chart(user_df, start_date_dt, end_date_dt,
transaction_category_array):
df_filter = filter_dataframe_between_two_datetimes(
user_df, start_date_dt, end_date_dt
)
values = []
labels = []
for category in transaction_category_array:
df_by_categ = df_filter[df_filter["Category"] == category]
category_sum = round(df_by_categ["Amount"].sum(), 2)
if category_sum > 0:
values.append(category_sum)
labels.append(category)
fig = go.Figure(data=[
go.Bar(
name="$",
y=labels,
x=values,
marker=dict(color=GREEN_COLOR),
orientation="h",
),
])
fig.update_traces(marker_line_width=0, opacity=0.9)
fig.update_layout(
barmode="group",
bargap=0.2,
margin=dict(l=0,r=0,t=50,b=0),
plot_bgcolor=PLOT_BGCOLOR,
xaxis=dict(fixedrange=True),
yaxis=dict(fixedrange=True),
)
return fig
def pie_chart_and_insights_card(dataframe, start_date, end_date,
transaction_category_array):
df_filter = filter_dataframe_between_two_datetimes(
dataframe, start_date, end_date
)
values = []
labels = []
label_value_hash = {}
for category in transaction_category_array:
df_by_categ = df_filter[df_filter["Category"] == category]
category_sum = round(df_by_categ["Amount"].sum(), 2)
if category_sum > 0:
label_value_hash[category] = category_sum
values.append(category_sum)
labels.append(category)
start_month = calendar.month_abbr[int(start_date.month)]
end_month = calendar.month_abbr[int(end_date.month)]
compact_start_date = f"{start_month} {start_date.day}, {start_date.year}"
compact_end_date = f"{end_month} {end_date.day}, {end_date.year}"
pie_chart = go.Figure(
data=[
go.Pie(
values=values,
labels=labels,
hoverinfo="percent+label",
marker=dict(
line=dict(color="#fafafa", width=4),
),
)
],
layout=dict(
showlegend=False,
margin=dict(b=0, t=0, l=0, r=0),
piecolorway=[GREEN_COLOR],
)
)
pie_chart.update_traces(textposition="inside", textinfo="percent+label")
# total spendings
total_spendings_label = "$" + str(round(sum(values), 2))
# calculate total income
total_income_for_period = 0
if "Income" in label_value_hash:
total_income_for_period = round(label_value_hash["Income"], 2)
total_income_for_period = "$" + str(total_income_for_period)
most_spent_category = "None"
if len(values) > 0:
most_spent_category = labels[values.index(max(values))]
least_spent_category = "None"
if len(values) > 0:
least_spent_category = labels[values.index(min(values))]
text_for_card = [
html.P(
f"Between {compact_start_date} and {compact_end_date}",
className="subtitle_style",
),
html.P(
[html.Span("Total Spendings:"),
html.Span(total_spendings_label,
className="floatRightStyle")],
),
html.P(
[html.Span("Total Income:"),
html.Span(total_income_for_period,
className="floatRightStyle")],
),
html.P(
[html.Span("Most Spent:"),
html.Span(most_spent_category,
className="floatRightStyle")],
),
html.P(
[html.Span("Least Spent:"),
html.Span(least_spent_category,
className="floatRightStyle")],
),
]
return pie_chart, text_for_card
def table_from_dataframe(df):
table_headers = ["Date", "Category", "Amount", "Balance"]
my_table = dash_table.DataTable(
id="transaction-table",
columns=[{"name": i, "id": i} for i in table_headers],
data=df[table_headers].to_dict("records"),
page_size=10,
style_as_list_view=True,
style_table={
},
style_cell={
# 'whiteSpace': 'normal',
'height': 'auto',
"color": "#111",
"textAlign": "left",
},
style_data_conditional=[
{
"if": {"row_index": "odd"},
"backgroundColor": "#eef",
}
],
style_cell_conditional=[],
style_header={
"backgroundColor": GREEN_COLOR,
"color": PLOT_BGCOLOR
}
)
return my_table
def serve_layout():
session_cookie = flask.request.cookies.get("custom-auth-session")
# landing login page
if session_cookie not in user_names.keys():
topnav = html.Div(
html.H1(""),
className="login-title"
)
return new_login(topnav)
# show the app
else:
greeting_and_logout_button = dbc.Row(
[
dbc.Col(
html.Span(
user_names[session_cookie],
id="username-greeting",
)
),
dbc.Col(
dcc.LogoutButton(
logout_url="/logout",
className="btn btn-outline-dark"
),
width="auto",
)
],
no_gutters=True,
className="ml-auto flex-nowrap mt-3 mt-md-0",
align="center",
)
topnav = dbc.Navbar([
dbc.NavbarBrand(
NAVBAR_TITLE,
className="ml-2 title-style",
),
dbc.NavbarBrand(
NAVBAR_SUBTITLE,
className="ml-2 subtitle_style"
),
greeting_and_logout_button,
],
color="light",
light=True,
id="navbar",
sticky=True,
)
user_df = load_dataframe(session_cookie)
transaction_category_array = list(user_df.Category.unique())
budget_df = load_budget_dataframe(session_cookie)
my_table = table_from_dataframe(user_df)
json_user_df = user_df.to_json(date_format='iso', orient='split')
json_budget_df = budget_df.to_json(date_format='iso', orient='split')
return html.Div([
html.Div(
topnav,
id="top-nav",
),
html.Div(
layout(user_df,
transaction_category_array,
{},
my_table),
id="app-content",
),
html.Div(
json_user_df,
id="hidden-dataframe",
className="hiddenDiv",
),
html.Div(
transaction_category_array,
id="hidden-transaction-category-array",
className="hiddenDiv",
),
html.Div(
json_budget_df,
id="hidden-budget-data",
className="hiddenDiv",
),
])
app.layout = serve_layout
@app.callback(
Output("modal-backdrop", "is_open"),
[Input("open-add-budget-modal", "n_clicks"),
Input("close-add-budget-modal", "n_clicks"),
Input("save-budget-modal", "n_clicks")],
[State("modal-backdrop", "is_open")]
)
def toggle_modal_for_add_budget(n1, n2, nSave, is_open):
if n1 or n2 or nSave:
return not is_open
return is_open
@app.callback(
Output("modal-remove-budget-backdrop", "is_open"),
[Input("open-remove-budget-modal", "n_clicks"),
Input("close-remove-budget-modal", "n_clicks"),
Input("save-remove-budget-modal", "n_clicks")],
[State("modal-remove-budget-backdrop", "is_open")]
)
def toggle_modal_for_remove_budget(n1, n2, nSave, is_open):
if n1 or n2 or nSave:
return not is_open
return is_open
@app.callback(
[Output("budget-bar-chart", "figure"),
Output("modal-budget-dropdown", "value"),
Output("modal-amount-for-budget", "value"),
Output("remove-budget-category-dropdown", "value"),
Output("remove-budget-category-dropdown", "options")],
[Input("save-budget-modal", "n_clicks"),
Input("save-remove-budget-modal", "n_clicks"),
Input("budget-month-select", "value")],
[State("modal-budget-dropdown", "value"),
State("modal-amount-for-budget", "value"),
State("remove-budget-category-dropdown", "value"),
State("hidden-dataframe", "children"),
State("hidden-budget-data", "children")]
)
def update_budget_bar_chart(save_add_budget_n, save_remove_budget_n,
yearMonth, category_to_update,
amount_for_update, category_to_delete,
jsonified_data, json_budget_df):
user_df = jsonified_data_to_dataframe(jsonified_data)
category_budget_hash = budget_dataframe_to_hash(json_budget_df)
if category_to_update:
category_budget_hash[category_to_update] = amount_for_update
elif category_to_delete:
del category_budget_hash[category_to_delete]
new_options = [{"label": key, "value": key} for key in category_budget_hash]
return [
budget_bar_chart(user_df, category_budget_hash, yearMonth),
"", "", "", new_options
]
@app.callback(
Output("budget-card-text", "children"),
[Input("save-budget-modal", "n_clicks"),
Input("save-remove-budget-modal", "n_clicks"),
Input("budget-month-select", "value")],
[State("hidden-dataframe", "children"),
State("hidden-budget-data", "children")]
)
def update_budget_card_contents(n1, n2, yearMonth, jsonified_data,
json_budget_df):
user_df = jsonified_data_to_dataframe(jsonified_data)
category_budget_hash = budget_dataframe_to_hash(json_budget_df)
budget_total = 0
funds_spent = 0
for category in category_budget_hash:
budget_total += category_budget_hash[category]
funds_spent += sum_of_category_expense_in_month(user_df, category, yearMonth)
funds_spent = round(funds_spent, 2)
budget_total = round(budget_total, 2)
year, month = yearMonth.split("-")
year = int(year)
month_abbr = calendar.month_abbr[int(month)]
balance = budget_total - funds_spent
balance_color = (GREEN_COLOR if budget_total - funds_spent > 0 else RED_COLOR)
progress_percent = 100 * (funds_spent / budget_total)
if progress_percent <= 90:
progress_color = "success"
elif progress_percent > 90 and progress_percent < 100:
progress_color = "warning"
else:
progress_color = "danger"
balance = round(balance, 2)
progress_percent = round(progress_percent, 1)
balance_text = None
if balance >= 0:
balance_text = "$" + str(balance)
else:
balance_text = "-$" + str(balance)[1:]
output = [
html.H4(f"Budget Metrics"),
html.P(
f"For {month_abbr}, {year}",
className="subtitle_style",
),
html.P(
[html.Span("Budget Total:"),
html.Span("$" + str(budget_total),
className="floatRightStyle")],
),
html.P(
[html.Span("Funds Spent:"),
html.Span("$" + str(funds_spent),
className="floatRightStyle")],
),
html.P([html.Span("Remaining: "),
html.Span(
balance_text,
style={
"color": balance_color,
"font-weight": 900,
"float": "right",
}
)
]),
dbc.Progress(
str(progress_percent)+"%",
value=progress_percent,
color=progress_color,
)
]
return output
@app.callback(
[Output("pie-chart", "figure"),
Output("insights-card-text", "children")],
[Input("date-picker-for-pie", "start_date"),
Input("date-picker-for-pie", "end_date")],
[State("hidden-dataframe", "children"),
State("hidden-transaction-category-array", "children")]
)
def update_pie_chart_and_insights_card(start_date, end_date, jsonified_data,
transaction_category_array):
user_df = jsonified_data_to_dataframe(jsonified_data)
start_date_dt = string_date_to_date_object(start_date)
end_date_dt = string_date_to_date_object(end_date)
return pie_chart_and_insights_card(user_df, start_date_dt, end_date_dt,
transaction_category_array)
@app.callback(
Output("insights-bar-chart", "figure"),
[Input("date-picker-for-pie", "start_date"),
Input("date-picker-for-pie", "end_date")],
[State("hidden-dataframe", "children"),
State("hidden-transaction-category-array", "children")]
)
def update_insights_bar_chart(start_date, end_date, jsonified_data,
transaction_category_array):
user_df = jsonified_data_to_dataframe(jsonified_data)
start_date_dt = string_date_to_date_object(start_date)
end_date_dt = string_date_to_date_object(end_date)
return insights_bar_chart(user_df, start_date_dt, end_date_dt,
transaction_category_array)
@app.callback(
Output("transactions-line-chart", "figure"),
[Input("date-picker-for-pie", "start_date"),
Input("date-picker-for-pie", "end_date"),
Input("checklist-inline-input", "value")],
[State("hidden-dataframe", "children")]
)
def update_transactions_line_chart(start_date, end_date, checklistInput,
jsonified_data):
user_df = jsonified_data_to_dataframe(jsonified_data)
start_date_dt = string_date_to_date_object(start_date)
end_date_dt = string_date_to_date_object(end_date)
chart = transactions_line_chart(
user_df, start_date_dt,
end_date_dt, checklistInput,
)
return chart
@app.callback(
Output("collapse", "is_open"),
[Input("collapse-button", "n_clicks")],
[State("collapse", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
if __name__ == "__main__":
app.run_server(debug=True)
| [
"adam@plot.ly"
] | adam@plot.ly |
b61e50e76ad27bc63647d402ed7b18c3b7bc2aae | 9d1701a88644663277342f3a12d9795cd55a259c | /CSC148/07 Sorting/runtime.py | 6d1020dee852cd090d7eccdd33874dd33c64eccf | [] | no_license | xxcocoymlxx/Study-Notes | cb05c0e438b0c47b069d6a4c30dd13ab97e4ee6d | c7437d387dc2b9a8039c60d8786373899c2e28bd | refs/heads/master | 2023-01-13T06:09:11.005038 | 2020-05-19T19:37:45 | 2020-05-19T19:37:45 | 252,774,764 | 2 | 0 | null | 2022-12-22T15:29:26 | 2020-04-03T15:44:44 | Jupyter Notebook | UTF-8 | Python | false | false | 3,989 | py | VIDEO:
https://www.youtube.com/watch?v=6Ol2JbwoJp0
NOTES ON THE PDF:
def max_segment_sum(L):
'''(list of int) -> int
Return maximum segment sum of L.
'''
max_so_far = 0
for lower in range(len(L)):
for upper in range(lower, len(L)):
sum = 0
for i in range(lower, upper + 1):
sum = sum + L[i]
max_so_far = max(max_so_far, sum)
return max_so_far
What is the running time of this algorithm? We want an answer in terms of n, not clock time
I want you to find the statement that executes most often; count the number of times that it runs
Statement that runs most often is one in the inner-most loop.
sum = sum + L[i]
Now let's upper-bound the number of times that this statement runs
lower loop runs n times.
Upper loop runs at most n times for each iteration of the lower loop
i loop runs at most n iterations for each iteration of the upper loop.
Now we can upper-bound the total number of times that the inner-most statement runs.
At most n*n*n = n^3
So we have an n^3 algorithm.
More precise: 2+2n^2+n^3 steps
Is it worth it? Or should we just stick to n^3
Prove that 2+2n^2+n^3 is O(n^3).
This means that we have to show 2+2n^2+n^3 is eventually <= kn^3 for some k > 0.
2+2n^2+n^3
<= 2n^3+2n^2+n^3
= 3n^3+2n^2
<= 3n^3+2n^3
= 5n^3
This is our proof that 2+2n^2+n^3 is O(n^3).
----------
We know that the segment-sum code is O(n^3).
Is the code O(n^4) too? Yes
Is it O(n^5)? Yes
Is it O(2^n)? yes
Is it O(n^2)? No
Big oh is an upper bound. If you make it worse (e.g. n^3 to n^4), it's just a worse upper bound. Still technically correct though.
But I want the most accurate bound; lowest upper bound.
----------
I'd like the big oh runtime for the following function.
O(1), O(log n), O(n), O(n log n), O(n^2), O(n^3), ... O(2^n)...
-I want the worst-case upper bound
def bigoh1(n):
sum = 0
for i in range(100, n):
sum = sum+1
print(sum)
It's O(n). It takes something like n-100 steps, which you can prove is O(n)!
----------
Let's do an ordering of best (fastest) to worst (slowest) algorithm efficiencies:
The best one is O(1). Constant-time algorithm
No matter how big your input, your runtime does not increase.
Example:
def f(n):
print('hello world')
-Return the first element of a list.
-Return the maximum of two characters.
Between constant and linear is O(log n)
Example: binary search
Getting worse...
O(n), linear algorithm.
-Printing all elements in a list
-finding the maximum element in a list
A little bit worse is O(n log n)
Examples: quicksort (on average), mergesort
Slower is O(n^2): bubble sort, insertion sort, selection sort
Slower is O(n^3): maximum segment sum code
Slower is O(n^4), O(n^5)...
...
Eventually you get so bad that you can't even use them in practice
O(2^n). As n increases by 1, you double the amount of time you take
Even worse...
O(n!). Like the permutation approach to finding all anagrams
O(n^n)
Huge difference between O(n^k) polynomials and O(k^n) exponential functions.
O(n^2) and O(2^n): very different.
O(n^2)is computable for reasonable-sized input; O(2^n) is not.
----------
I'd like the big oh runtime for each of these functions.
e.g. O(1), O(log n), O(n), O(n log n), O(n^2), O(n^3), ... O(2^n)...
-I want the worst-case upper bound
def bigoh1(n):
sum = 0
for i in range(100, n):
sum = sum+1
print(sum)
O(n)
def bigoh2(n):
sum = 0
for i in range(1, n // 2):
sum = sum + 1
for j in range(1, n * n):
sum = sum + 1
print(sum)
First loop is n steps, second is n^2 steps.
n+n^2 = o(n^2)
def bigoh3(n):
sum = 0
if n % 2 == 0:
for j in range(1, n * n):
sum = sum + 1
else:
for k in range(5, n + 1):
sum = sum + k
print(sum)
If n is even, we do n^2 work. If n is odd, we do n work.
Remember that we want the worst-case.
O(n^2)
def bigoh4(m, n):
sum = 0
for i in range(1, n + 1):
for j in range(1, m + 1):
sum = sum + 1
print(sum)
O(n*m)
Not O(n^2). Not O(m^2).
| [
"coco.yang@mail.utoronto.ca"
] | coco.yang@mail.utoronto.ca |
99902ea2cb3fb0f1373b24bf541e6e72fe9a3f51 | cd644d9ce7a135ab2ee5d08e35ca73b511a755a7 | /mylist/urls.py | ac2e42260451c9d8cd7c559d9166a20ecfe81c0d | [] | no_license | Arunthogadiya/Todo-App | 09e08bca4d94ad85f6c6e2bffec126d0ef599e72 | bde30977b2e3db030acd0dfdefda37c08f57063e | refs/heads/main | 2022-12-28T10:14:39.685985 | 2020-10-16T18:10:59 | 2020-10-16T18:10:59 | 304,684,710 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | """mylist URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('todolist.urls')),
]
| [
"arun.p@qburst.com"
] | arun.p@qburst.com |
f0145bbece8a0f9e0a268f8609882696ed918bea | 9c74e5acc22fef0ff254023b0c90951ce083f171 | /tests/categorical_ensembling_test.py | 313767f6fce372ce9a5ef394d683123426e85c77 | [
"MIT"
] | permissive | PartTimeHackerman/auto_ml | 067bfbfcf9fe5ca28e310def6d80cefaca6a53bc | e36c77ae443c5427581df837227a6955b1bf3663 | refs/heads/master | 2021-06-25T03:23:43.833193 | 2017-08-16T15:20:41 | 2017-08-16T15:20:41 | 100,502,812 | 0 | 0 | null | 2017-08-16T15:13:44 | 2017-08-16T15:13:44 | null | UTF-8 | Python | false | false | 1,129 | py | import os
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from auto_ml import Predictor
import dill
import numpy as np
from nose.tools import assert_equal, assert_not_equal, with_setup
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import utils_testing as utils
def test_categorical_ensemble_basic_classifier():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'pclass': 'categorical'
, 'embarked': 'categorical'
, 'sex': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train_categorical_ensemble(df_titanic_train, categorical_column='pclass', optimize_final_model=False)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
# Small sample sizes mean there's a fair bit of noise here
assert -0.155 < test_score < -0.135
| [
"ClimbsBytes@gmail.com"
] | ClimbsBytes@gmail.com |
ce73ac5a7f8fe78357fc9d5d06043b38d25f2d9f | 395f1d1d4dbb5816708a346821a984a06d76f815 | /untitled0.py | dfe550d38c43e80f094631f0d543c059557db314 | [] | no_license | maxsorokin9/2 | cc1245de81a797fb4673ba0a4b1be0be3f1feebc | 5fced3478a21997d600dff9f63e3669e980607cc | refs/heads/master | 2022-04-12T15:33:51.970004 | 2020-03-26T09:41:32 | 2020-03-26T09:41:32 | 250,214,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,094 | py | import arcade
import os
SPRITE_SCALING = 0.5
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 800
SCREEN_TITLE = "Swimming"
class Player(arcade.Sprite):
def update(self):
self.center_x += self.change_x
self.center_y += self.change_y
if self.left < 0:
self.left = 0
elif self.right > SCREEN_WIDTH - 1:
self.right = SCREEN_WIDTH - 1
if self.bottom < 0:
self.bottom = 0
elif self.top > SCREEN_HEIGHT - 1:
self.top = SCREEN_HEIGHT - 1
class MyGame(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
self.player_list = None
self.player_sprite = None
arcade.set_background_color(arcade.color.BLACK)
def setup(self):
self.player_list = arcade.SpriteList()
self.player_sprite = Player("EDtWyHPW4AA9hNY.jpg", SPRITE_SCALING)
self.player_sprite.center_x = 50
self.player_sprite.center_y = 50
self.player_list.append(self.player_sprite)
def on_draw(self):
arcade.start_render()
self.player_list.draw()
def on_update(self, delta_time):
self.player_list.update()
def on_key_press(self, key, modifiers):
if key == arcade.key.UP:
self.player_sprite.change_y = 5
elif key == arcade.key.DOWN:
self.player_sprite.change_y = -5
elif key == arcade.key.LEFT:
self.player_sprite.change_x = -5
elif key == arcade.key.RIGHT:
self.player_sprite.change_x = 5
def on_key_release(self, key, modifiers):
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player_sprite.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player_sprite.change_x = 0
def main():
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main() | [
"maxsorokin9@gmail.com"
] | maxsorokin9@gmail.com |
6ffe2a06880751514bb23ef6b2258b10f8257c43 | 14d7f5f83b6f84871ff6ebfa0af4c17b7115a33f | /remote_sensing/MODIS_data_test_v3.py | 1f15cb363abab3ce4c3e8caedc88d88198bb5e8d | [] | no_license | tonychangmsu/Python_Scripts | 8ca7bc841c94dcab36743bce190357ac2b1698a5 | 036f498b1fc68953d90aac15f0a5ea2f2f72423b | refs/heads/master | 2016-09-11T14:32:17.133399 | 2016-03-28T16:34:40 | 2016-03-28T16:34:40 | 10,370,475 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,468 | py | #Title: MODIS_data_test.py
#Author: Tony Chang
#Abstract: Test for opening MODIS data and examining the various bands
#Creation Date: 04/14/2015
#Modified Dates: 01/20/2016, 01/26/2016, 01/28/2016, 01/29/2016, 02/01/2016
#local directory : K:\\NASA_data\\scripts
import numpy as np
import matplotlib.pyplot as plt
import os
os.chdir("K:\\NASA_data\\scripts")
import time
import MODIS_acquire as moda
import MODIS_tassel_cap as tas
import MODIS_process as mproc
import tiff_write as tw
#MODIS file name as
# 7 char (product name .)
# 8 char (A YYYYDDD .)
# 6 char (h XX v YY .) #tile index
# 3 char (collection version .) #typically 005
# 14 char (julian date of production YYYYDDDHHMMSS)
if __name__ == "__main__":
start = time.time()
#since we have the date, let's try to get all the data from that date together.
htile = 9
vtile = 4
factor = 0.0001
year = 2000
#we would iterate through the year
begin_year = 2000
end_year = 2015
wd = 'G:\\NASA_remote_data\\MOD09A1'
mod_list, mod_dates = moda.mod_file_search(wd, year, True)
#then iterate through theses list values
scene = 0
mod_data, dnames = moda.mod_acquire_by_file(mod_list[scene]) #this is the full dataset
band_query = 1
#get the files needed
files_to_mosaic = moda.mod_date_dataset_list(wd, mod_dates[scene])
nonproj_mosaics = mproc.mosaic_files(files_to_mosaic, reproj = False)
reproj_mosaics = mproc.mosaic_files(files_to_mosaic, reproj = True, method = 0)
#inspect the cloud effects on the nonproj and reproj mosaics
#looks like it comes from band 5! 1230-1250, ,Leaf/Canopy Differences
#not much can be done about that if this is prevalent. In the mean time, we should just implement
#the processing and use the QC to fix the problem
#at this point we would like to transform the data. Then we can apply the reprojection
#need to be careful here, do we reproject before transform or after? before...
transformed = tas.tassel_cap_transform(nonproj_mosaics[:7]) #don't want to include the qc data
#check out the tasseled_cap again. getting some striping for some reason.
tw.tiff_write_gdal(transformed[0], 'K:\\NASA_data\\test\\test_clip.tif')
tw.tiff_write(out, x_size, y_size, cell_size, ymax, xmin, 'K:\\NASA_data\\test\\test_clip.tif')
#tas_array = moda.datasets_to_array(transformed, False)
#find the bounding box by the netCDF from TOPOWX
#GYE coordinates
xmin = -112.39583333837999 #112 23 45
xmax = -108.19583334006 #108 11 45
ymin = 42.279166659379996 #42 16 45
ymax = 46.195833324479999 #46 11 45
aoa = [xmin, xmax, ymin, ymax]
clip = mproc.clip_wgs84_scene(aoa, transformed[0])
#some problems with the reprojection process?
#NO..getting some strange stripe artifacts from the tasselled cap, but could be inherant in the MOD09 data itself...
#all this works now. So now perform this for all the MODIS data and store it in a netCDF4 file that
#is continuous for each year.
#write the file to check it out
tw.tiff_write(clip, np.shape(clip)[1], np.shape(clip)[0], cell_size, ymax, xmin, 'K:\\NASA_data\\test\\', 'test_clip.tif')
#now just write this function for netCDF4
#then save to a netCDF4 file
#then repeat for all the data.
end = time.time()
print('run time :%s'%(end-start)) #takes about 25-30 seconds
'''
mproc.plot_refl(mod_array)
#plot all the reflectances
#see which is faster
import time
start = time.time()
b,g,w = tas.tassel_cap_transform(mod_array)
end = time.time()
mproc.plot_tassel_cap(b,g,w)
'''
| [
"tony.chang@msu.montana.edu"
] | tony.chang@msu.montana.edu |
9c8f2cb092f1eb626c5a5d3c2f242154ab5beb58 | 607c9dab45c75d2168883120e65e5002f3b7ec9e | /article/views.py | 1f624919b81d2e95376b00e866d97a035df31960 | [] | no_license | SaltFishGuy/Django-blog | dee906473f622e72e941bb393a2fa51a2f7ab74d | 6ade95cd0f9040737c4a081af832d5e3ec19f85a | refs/heads/master | 2020-06-02T10:53:22.687508 | 2019-06-10T11:57:44 | 2019-06-10T11:57:44 | 191,132,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,622 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from .forms import *
from django.views.decorators.http import require_POST
from .models import *
from django.shortcuts import get_object_or_404
@login_required(login_url='/account/login/')
@csrf_exempt #在视图函数中使用这个装饰器,是另一种操作POST时的加密机制,之前我们使用过 csrf.js 文件,表单中还使用过 {% csrf_token %}
def article_column(request):
if request.method == "GET":
columns = ArticleColumn.objects.filter(user=request.user)
column_form = ArticleColumnForm()
return render(request, "article/article_column.html", {"columns": columns, 'column_form': column_form})
if request.method == "POST":
column_name = request.POST['column']
columns = ArticleColumn.objects.filter(user_id=request.user.id, column=column_name)
if columns:
return HttpResponse('2')
else:
ArticleColumn.objects.create(user=request.user, column=column_name)
return HttpResponse("1")
@login_required(login_url='/account/login')
@require_POST
@csrf_exempt
def rename_article_column(request):
column_name = request.POST["column_name"]
column_id = request.POST['column_id']
try:
line = ArticleColumn.objects.get(id=column_id)
line.column = column_name
line.save()
return HttpResponse("1")
except:
return HttpResponse("0")
@login_required(login_url='/account/login')
@require_POST
@csrf_exempt
def del_article_column(request):
column_id = request.POST["column_id"]
try:
line = ArticleColumn.objects.get(id=column_id)
line.delete()
return HttpResponse("1")
except:
return HttpResponse("2")
import json
@login_required(login_url='/account/login')
@csrf_exempt
def article_post(request):
if request.method == "POST":
article_post_form = ArticlePostForm(data=request.POST)
if article_post_form.is_valid():
cd = article_post_form.cleaned_data
try:
new_article = article_post_form.save(commit=False)
new_article.author = request.user
new_article.column = request.user.article_column.get(id=request.POST['column_id'])
new_article.save()
tags = request.POST['tags']
if tags:
for atag in json.loads(tags):
tag = request.user.tag.get(tag=atag)
new_article.article_tag.add(tag)
return HttpResponse("1")
except:
return HttpResponse("2")
else:
return HttpResponse("3")
else:
article_post_form = ArticlePostForm()
article_columns = request.user.article_column.all()
article_tags = request.user.tag.all()
return render(request, "article/column/article_post.html", {"article_post_form": article_post_form, "article_columns": article_columns, "article_tags": article_tags})
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
@login_required(login_url='/account/login')
def article_list(request):
articles_list = ArticlePost.objects.filter(author=request.user)
paginator = Paginator(articles_list, 2)
page = request.GET.get('page')
try:
current_page = paginator.page(page)
articles = current_page.object_list
except PageNotAnInteger:
current_page = paginator.page(1)
articles = current_page.object_list
except EmptyPage:
current_page = paginator.page(paginator.num_pages)
articles = current_page.object_list
return render(request, "article/column/article_list.html", {"articles": articles, "page": current_page})
@login_required(login_url='/account/login')
def article_detail(request, id, slug):
article = get_object_or_404(ArticlePost,id=id, slug=slug ,)
return render(request, "article/column/article_detail.html", {"article": article})
@login_required(login_url='/account/login')
@require_POST
@csrf_exempt
def del_article(request):
article_id = request.POST['article_id']
try:
article = ArticlePost.objects.get(id=article_id)
article.delete()
return HttpResponse("1")
except:
return HttpResponse("2")
@login_required(login_url='/account/login')
@csrf_exempt
def redit_article(request, article_id):
if request.method == "GET":
article_columns = request.user.article_column.all()
article = ArticlePost.objects.get(id=article_id)
this_article_form = ArticlePostForm(initial={"title": article.title})
this_article_column = article.column
return render(request, "article/column/redit_article.html",
{"article": article, "article_columns": article_columns, "this_article_column": this_article_column, "this_article_form": this_article_form})
else:
redit_article = ArticlePost.objects.get(id=article_id)
try:
redit_article.column = request.user.article_column.get(id=request.POST['column_id'])
redit_article.title = request.POST['title']
redit_article.body = request.POST['body']
redit_article.save()
return HttpResponse("1")
except:
return HttpResponse("2")
@login_required(login_url='/account/login')
@csrf_exempt
def article_tag(request):
if request.method == "GET":
article_tags = ArticleTag.objects.filter(author=request.user)
article_tag_form = ArticleTagForm()
return render(request, "article/tag/tag_list.html", {"article_tags": article_tags, "article_tag_form": article_tag_form})
if request.method == "POST":
tag_post_form = ArticleTagForm(data=request.POST)
if tag_post_form.is_valid():
try:
new_tag = tag_post_form.save(commit=False)
new_tag.author = request.user
new_tag.save()
return HttpResponse("1")
except:
return HttpResponse("the data cannot be save.")
else:
return HttpResponse("sorry, the form is not valid.")
@login_required(login_url='/account/login')
@require_POST
@csrf_exempt
def del_article_tag(request):
tag_id = request.POST['tag_id']
try:
tag = ArticleTag.objects.get(id=tag_id)
tag.delete()
return HttpResponse("1")
except:
return HttpResponse("2")
| [
"907799074@qq.com"
] | 907799074@qq.com |
3dd1f2de546b127f150b7cd27a8484e13f04f2e2 | 08d0df51ed31e4ec3d867493b7cff8d6b59a7d3e | /Assignment 5/calculator.py | f215a387307fde94e6b37fbf060338ca02ef3e58 | [] | no_license | hasongtran/Programming-for-Cog-Sci | 0e5e7325ed49bfbc5d1c41008ce7842cdab7cafe | 9f67ca08a19b8f42aba015b24e0822769433fa3e | refs/heads/master | 2021-08-17T06:25:35.711503 | 2017-11-20T21:22:13 | 2017-11-20T21:22:13 | 111,462,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | history = []
def add(a, b):
try:
answer = float(a) + float(b)
output = str(a) + ' + ' + str(b) + ' = ' + str(answer)
print(output)
history.append(output)
except TypeError:
print('ERROR')
def sub(a, b):
try:
answer = float(a) - float(b)
output = str(a) + ' + ' + str(b) + ' = ' + str(answer)
print(output)
history.append(output)
except TypeError:
print('ERROR')
def mul(a, b):
try:
answer = float(a) * float(b)
output = str(a) + ' * ' + str(b) + ' = ' + str(answer)
print(output)
history.append(output)
except TypeError:
print('ERROR')
def div(a, b):
try:
answer = float(a) / float(b)
output = str(a) + ' / ' + str(b) + ' = ' + str(answer)
print(output)
history.append(output)
except TypeError:
print('ERROR')
except ZeroDivisionError:
print(str(a) + '/' + str(b) + '=' + 'UNDEFINED')
def calc_history():
for answers in range(len(history)):
print(history[answers])
def clear_history():
history = []
print(history)
if __name__ == '__main__':
add(3, 5)
mul(4, 6)
div(10, 2)
sub(100, 70)
calc_history()
| [
"noreply@github.com"
] | noreply@github.com |
e3ede7d4acdd774e7b8621e60be2e1b12dc0f0e1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02845/s251805975.py | a8e1b9dedbc87deeb6d7dd5ca8fac2fa7aa26e80 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
n = int(readline())
a = list(map(int, readline().split()))
cnt = [0] * 3
ans = 1
for x in a:
p = cnt.count(x)
if p == 0:
return print(0)
ans *= p
ans %= MOD
cnt[cnt.index(x)] += 1
print(ans)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a71c788c6e3384c8e52aa42ae232c8d33676cace | 102e4b64a0c7bb415235437f1886c7261448bab8 | /SelectiveMemory/QasFeature/HalfCheetah_SMQ_V1.py | 6da255cd764adf55919697173b7dbc1b4e4faba7 | [
"MIT"
] | permissive | ProGamerCode/FitML | 944db5c78567bab69a2470789f06803e6115b873 | 3b44160bbf6c0587b8df198d3ceef10a42e2bfca | refs/heads/master | 2022-05-01T23:18:09.883873 | 2018-02-13T15:13:45 | 2018-02-13T15:13:45 | 121,414,499 | 0 | 0 | MIT | 2022-03-04T08:58:55 | 2018-02-13T17:42:20 | Python | UTF-8 | Python | false | false | 16,568 | py | '''
Half Cheetah with Selective memory and Q as feature
solution by Michel Aka author of FitML github blog and repository
https://github.com/FitMachineLearning/FitML/
https://www.youtube.com/channel/UCi7_WxajoowBl4_9P0DhzzA/featured
Update
Flat 4K network
200 random action generated every step (most significant improvement)
Using 60 as feature normalizer
Q as discriminator
'''
import numpy as np
import keras
import gym, pybullet_envs
import pybullet as pb
import os
import h5py
import matplotlib.pyplot as plt
import math
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import LSTM
from keras import optimizers
num_env_variables = 26
num_env_actions = 6
num_initial_observation = 0
learning_rate = 0.005
apLearning_rate = 0.003
version_name = "CheetahBulletEnv-SMQ-v15"
weigths_filename = version_name+"-weights.h5"
apWeights_filename = version_name+"-weights-ap.h5"
#range within wich the SmartCrossEntropy action parameters will deviate from
#remembered optimal policy
sce_range = 0.2
b_discount = 0.985
#max_memory_len = 2000000
max_memory_len = 50000
experience_replay_size = 10000
random_every_n = 20
starting_explore_prob = 0.05
training_epochs = 3
mini_batch = 256
load_previous_weights = True
observe_and_train = True
save_weights = True
save_memory_arrays = True
load_memory_arrays = True
do_training = True
num_games_to_play = 46000
max_steps = 300
#Selective memory settings
sm_normalizer = 40
sm_memory_size = 8400
#One hot encoding array
possible_actions = np.arange(0,num_env_actions)
actions_1_hot = np.zeros((num_env_actions,num_env_actions))
actions_1_hot[np.arange(num_env_actions),possible_actions] = 1
#Create testing enviroment
env = gym.make('HalfCheetahBulletEnv-v0')
env.render(mode="human")
env.reset()
print("-- Observations",env.observation_space)
print("-- actionspace",env.action_space)
#initialize training matrix with random states and actions
dataX = np.random.random(( 5,num_env_variables+num_env_actions ))
#Only one output for the total score / reward
dataY = np.random.random((5,1))
#initialize training matrix with random states and actions
apdataX = np.random.random(( 5,num_env_variables ))
apdataY = np.random.random((5,num_env_actions))
def custom_error(y_true, y_pred, Qsa):
cce=0.001*(y_true - y_pred)*Qsa
return cce
#nitialize the Reward predictor model
Qmodel = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
Qmodel.add(Dense(4096, activation='relu', input_dim=dataX.shape[1]))
#Qmodel.add(Dropout(0.2))
#Qmodel.add(Dense(256, activation='relu'))
#Qmodel.add(Dropout(0.2))
#Qmodel.add(Dense(256, activation='relu'))
#Qmodel.add(Dropout(0.2))
Qmodel.add(Dense(dataY.shape[1]))
opt = optimizers.adam(lr=learning_rate)
Qmodel.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
#initialize the action predictor model
action_predictor_model = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
action_predictor_model.add(Dense(4096, activation='relu', input_dim=apdataX.shape[1]))
#action_predictor_model.add(Dropout(0.2))
#action_predictor_model.add(Dense(256, activation='relu'))
#action_predictor_model.add(Dropout(0.2))
#action_predictor_model.add(Dense(256, activation='relu'))
#action_predictor_model.add(Dropout(0.2))
action_predictor_model.add(Dense(apdataY.shape[1]))
opt2 = optimizers.adam(lr=apLearning_rate)
action_predictor_model.compile(loss='mse', optimizer=opt2, metrics=['accuracy'])
#load previous model weights if they exist
if load_previous_weights:
dir_path = os.path.realpath(".")
fn = dir_path + "/"+weigths_filename
print("filepath ", fn)
if os.path.isfile(fn):
print("loading weights")
Qmodel.load_weights(weigths_filename)
else:
print("File ",weigths_filename," does not exis. Retraining... ")
#load previous action predictor model weights if they exist
if load_previous_weights:
dir_path = os.path.realpath(".")
fn = dir_path + "/"+ apWeights_filename
print("filepath ", fn)
if os.path.isfile(fn):
print("loading weights")
action_predictor_model.load_weights(apWeights_filename)
else:
print("File ",apWeights_filename," does not exis. Retraining... ")
memorySA = np.zeros(shape=(1,num_env_variables+num_env_actions))
memoryS = np.zeros(shape=(1,num_env_variables))
memoryA = np.zeros(shape=(1,1))
memoryR = np.zeros(shape=(1,1))
memoryRR = np.zeros(shape=(1,1))
if load_memory_arrays:
if os.path.isfile(version_name+'memorySA.npy'):
print("Memory Files exist. Loading...")
memorySA = np.load(version_name+'memorySA.npy')
memoryRR = np.load(version_name+'memoryRR.npy')
memoryS = np.load(version_name+'memoryS.npy')
memoryA = np.load(version_name+'memoryA.npy')
memoryR = np.load(version_name+'memoryR.npy')
else:
print("No memory Files. Recreating")
mstats = []
sm_add_counts = 0
def predictTotalRewards(qstate, action):
qs_a = np.concatenate((qstate,action), axis=0)
predX = np.zeros(shape=(1,num_env_variables+num_env_actions))
predX[0] = qs_a
#print("trying to predict reward at qs_a", predX[0])
pred = Qmodel.predict(predX[0].reshape(1,predX.shape[1]))
remembered_total_reward = pred[0][0]
return remembered_total_reward
def GetRememberedOptimalPolicy(qstate):
predX = np.zeros(shape=(1,num_env_variables))
predX[0] = qstate
#print("trying to predict reward at qs_a", predX[0])
pred = action_predictor_model.predict(predX[0].reshape(1,predX.shape[1]))
r_remembered_optimal_policy = pred[0]
return r_remembered_optimal_policy
def addToMemory(reward,stepReward,memMax,averegeReward,gameAverage):
#diff = reward - ((averegeReward+memMax)/2)
diff = reward - stepReward
gameFactor = ((gameAverage-averegeReward)/math.fabs(memMax-averegeReward) )
if gameFactor <0:
gameFactor = 0
prob = 0.005
if reward > averegeReward:
prob = prob + 0.95 * (diff / sm_normalizer)
#prob = prob * (1+gameFactor*3)
#prob = prob * (0.1+gameFactor)
#print("add reward",reward,"diff",diff,"prob",prob,"average", averegeReward,"max",memMax)
else:
prob = prob + 0.005/1000 * (diff / (40+math.fabs(diff)))
if diff < 0:
return False
if np.random.rand(1)<=prob :
#print("Adding reward",reward," based on prob ", prob)
#print("add reward",reward,"diff",diff,"prob",prob,"average","gameFactor",gameFactor, averegeReward,"max",memMax)
return True
else:
return False
if observe_and_train:
#Play the game 500 times
for game in range(num_games_to_play):
gameSA = np.zeros(shape=(1,num_env_variables+num_env_actions))
gameS = np.zeros(shape=(1,num_env_variables))
gameA = np.zeros(shape=(1,num_env_actions))
gameR = np.zeros(shape=(1,1))
#Get the Q state
#pb.resetSimulation()
qs = env.reset()
#print("qs ", qs)
'''
if game < num_initial_observation:
print("Observing game ", game)
else:
print("Learning & playing game ", game)
'''
for step in range (5000):
if game < num_initial_observation:
#take a radmon action
a = env.action_space.sample()
else:
prob = np.random.rand(1)
explore_prob = starting_explore_prob-(starting_explore_prob/num_games_to_play)*game
#Chose between prediction and chance
if prob < explore_prob or game%random_every_n==0:
#take a random action
a = env.action_space.sample()
else:
#Get Remembered optiomal policy
remembered_optimal_policy = GetRememberedOptimalPolicy(qs)
stock = np.zeros(200)
stockAction = np.zeros(shape=(200,num_env_actions))
for i in range(200):
stockAction[i] = env.action_space.sample()
stock[i] = predictTotalRewards(qs,stockAction[i])
best_index = np.argmax(stock)
randaction = stockAction[best_index]
#Compare R for SmartCrossEntropy action with remembered_optimal_policy and select the best
#if predictTotalRewards(qs,remembered_optimal_policy) > utility_possible_actions[best_sce_i]:
if predictTotalRewards(qs,remembered_optimal_policy) > predictTotalRewards(qs,randaction):
a = remembered_optimal_policy
#print(" | selecting remembered_optimal_policy ",a)
else:
a = randaction
#print(" - selecting generated optimal policy ",a)
env.render()
qs_a = np.concatenate((qs,a), axis=0)
#get the target state and reward
s,r,done,info = env.step(a)
#record only the first x number of states
if step ==0:
gameSA[0] = qs_a
gameS[0] = qs
gameR[0] = np.array([r])
gameA[0] = np.array([r])
else:
gameSA= np.vstack((gameSA, qs_a))
gameS= np.vstack((gameS, qs))
gameR = np.vstack((gameR, np.array([r])))
gameA = np.vstack((gameA, np.array([a])))
if step > max_steps:
done = True
if done :
tempGameSA = np.zeros(shape=(1,num_env_variables+num_env_actions))
tempGameS = np.zeros(shape=(1,num_env_variables))
tempGameA = np.zeros(shape=(1,num_env_actions))
tempGameR = np.zeros(shape=(1,1))
tempGameRR = np.zeros(shape=(1,1))
#Calculate Q values from end to start of game
#mstats.append(step)
for i in range(0,gameR.shape[0]):
#print("Updating total_reward at game epoch ",(gameY.shape[0]-1) - i)
if i==0:
#print("reward at the last step ",gameY[(gameY.shape[0]-1)-i][0])
gameR[(gameR.shape[0]-1)-i][0] = gameR[(gameR.shape[0]-1)-i][0]
else:
#print("local error before Bellman", gameY[(gameY.shape[0]-1)-i][0],"Next error ", gameY[(gameY.shape[0]-1)-i+1][0])
gameR[(gameR.shape[0]-1)-i][0] = gameR[(gameR.shape[0]-1)-i][0]+b_discount*gameR[(gameR.shape[0]-1)-i+1][0]
#print("reward at step",i,"away from the end is",gameY[(gameY.shape[0]-1)-i][0])
if i==gameR.shape[0]-1 and game%5==0:
print("Training Game #",game,"last everage",memoryR[:-1000].mean(),"game mean",gameR.mean(),"memoryR",memoryR.shape[0], "SelectiveMem Size ",memoryRR.shape[0],"Selective Mem mean",memoryRR.mean(axis=0)[0],"previous sm_add_counts",sm_add_counts, " steps = ", step ,"last reward", r," finished with headscore ", gameR[(gameR.shape[0]-1)-i][0])
if memoryR.shape[0] ==1:
memorySA = gameSA
memoryR = gameR
memoryA = gameA
memoryS = gameS
memoryRR = gameR
tempGameA = tempGameA[1:]
tempGameS = tempGameS[1:]
tempGameRR = tempGameRR[1:]
tempGameR = tempGameR[1:]
tempGameSA = tempGameSA[1:]
for i in range(gameR.shape[0]):
tempGameSA = np.vstack((tempGameSA,gameSA[i]))
tempGameR = np.vstack((tempGameR,gameR[i]))
#Add experience to memory
#memorySA = np.concatenate((memorySA,gameSA),axis=0)
#memoryR = np.concatenate((memoryR,gameR),axis=0)
sm_add_counts = 0
#print("memoryR average", memoryR.mean(axis=0)[0])
for i in range(0,gameR.shape[0]):
pr = predictTotalRewards(gameS[i],gameA[i])
# if you did better than expected then add to memory
#if game > 3 and addToMemory(gameR[i][0], pr ,memoryRR.max(),memoryR.mean(axis=0)[0],gameR.mean(axis=0)[0]):
if game > 3 and addToMemory(gameR[i][0], pr ,memoryRR.max(),memoryR.mean(axis=0)[0],gameR.mean(axis=0)[0]):
sm_add_counts+=1
tempGameA = np.vstack((tempGameA,gameA[i]))
tempGameS = np.vstack((tempGameS,gameS[i]))
tempGameRR = np.vstack((tempGameRR,gameR[i]))
if memoryR.shape[0] ==1:
memoryA = tempGameA
memoryS = tempGameS
memoryRR = tempGameRR
memoryR = tempGameR
memorySA = tempGameSA
else:
#Add experience to memory
memoryS = np.concatenate((memoryS,tempGameS),axis=0)
memoryRR = np.concatenate((memoryRR,tempGameRR),axis=0)
memoryA = np.concatenate((memoryA,tempGameA),axis=0)
memorySA = np.concatenate((memorySA,tempGameSA),axis=0)
memoryR = np.concatenate((memoryR,tempGameR),axis=0)
#if memory is full remove first element
if np.alen(memoryR) >= max_memory_len:
memorySA = memorySA[gameR.shape[0]:]
memoryR = memoryR[gameR.shape[0]:]
if np.alen(memoryA) >= sm_memory_size:
memoryA = memoryA[int(sm_memory_size/10):]
memoryS = memoryS[int(sm_memory_size/10):]
memoryRR = memoryRR[int(sm_memory_size/10):]
#Update the states
qs=s
#Retrain every X failures after num_initial_observation
if done and game >= num_initial_observation and do_training and game >= 5:
if game%10 == 0:
if game%25 == 0:
print("Training game# ", game,"momory size", memorySA.shape[0])
tSA = (memorySA)
tR = (memoryR)
tX = (memoryS)
tY = (memoryA)
#sw = (memoryAdv)
train_Q = np.random.randint(tR.shape[0],size=experience_replay_size)
train_A = np.random.randint(tY.shape[0],size=int(experience_replay_size/3))
tX = tX[train_A,:]
tY = tY[train_A,:]
#sw = sw[train_idx,:]
tR = tR[train_Q,:]
tSA = tSA[train_Q,:]
#training Reward predictor model
Qmodel.fit(tSA,tR, batch_size=mini_batch,epochs=training_epochs,verbose=0)
#training action predictor model
action_predictor_model.fit(tX,tY, batch_size=mini_batch, epochs=training_epochs,verbose=0)
if done and game >= num_initial_observation:
if save_weights and game%20 == 0 and game >35:
#Save model
#print("Saving weights")
Qmodel.save_weights(weigths_filename)
action_predictor_model.save_weights(apWeights_filename)
if save_memory_arrays and game%20 == 0 and game >35:
np.save(version_name+'memorySA.npy',memorySA)
np.save(version_name+'memoryRR.npy',memoryRR)
np.save(version_name+'memoryS.npy',memoryS)
np.save(version_name+'memoryA.npy',memoryA)
np.save(version_name+'memoryR.npy',memoryR)
if done:
'''
#Game won conditions
if step > 197:
print("Game ", game," WON *** " )
else:
print("Game ",game," ended with positive reward ")
#Game ended - Break
'''
break
plt.plot(mstats)
plt.show()
if save_weights:
#Save model
print("Saving weights")
Qmodel.save_weights(weigths_filename)
action_predictor_model.save_weights(apWeights_filename)
| [
"noreply@github.com"
] | noreply@github.com |
5489ca8b0d85c922b29d4ecdf85dbb5710f999e4 | 1341f16b53f6630b52096ecdb2e9e21b2deff1e8 | /A2Part2.py | d2341f41e99ec4a096ddf4f188e1cbc132d608cd | [] | no_license | edufire/ASMA | 2209d42780008e43f7c4efc93d281b5058c6e44c | 79071efbcd427f394342a7c6c6f00f765d5a930f | refs/heads/master | 2020-05-23T08:14:30.206578 | 2016-11-08T19:29:40 | 2016-11-08T19:29:40 | 70,271,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | import numpy as np
"""
A2-Part-2: Generate a complex sinusoid
Write a function to generate the complex sinusoid that is used in DFT computation of length N (samples),
corresponding to the frequency index k. Note that the complex sinusoid used in DFT computation has a
negative sign in the exponential function.
The amplitude of such a complex sinusoid is 1, the length is N, and the frequency in radians is 2*pi*k/N.
The input arguments to the function are two positive integers, k and N, such that k < N-1.
The function should return cSine, a numpy array of the complex sinusoid.
EXAMPLE: If you run your function using N=5 and k=1, the function should return the following numpy array cSine:
array([ 1.0 + 0.j, 0.30901699 - 0.95105652j, -0.80901699 - 0.58778525j, -0.80901699 + 0.58778525j,
0.30901699 + 0.95105652j])
"""
def genComplexSine(k, N):
"""
Inputs:
k (integer) = frequency index of the complex sinusoid of the DFT
N (integer) = length of complex sinusoid in samples
Output:
The function should return a numpy array
cSine (numpy array) = The generated complex sinusoid (length N)
"""
n = np.arange(N)
return np.exp(-1j*2*np.pi*n*k/N)
| [
"noreply@github.com"
] | noreply@github.com |
491947913e74f4724ae182463fead075a1427afc | c356e123abdc3260244328004ea4ffd00155def6 | /test_3.py | 035ded0e6a2ef065c46caf1e3f9c4d51a1ef4cb4 | [] | no_license | choice4/WNN-WN | 83cdcebcc17afb71778850c99ca7ce0193ef0e9e | a8dd80d69aed9ea4ef3b691c7e0b11b192931a4d | refs/heads/master | 2021-08-14T10:33:49.297253 | 2014-06-23T00:01:57 | 2014-06-23T00:01:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | __author__ = 'martslaaf'
import numpy as np
from matplotlib.pyplot import plot, show
from wavenet import wavelon_class_constructor, trainer
inp_1 = map(lambda x: float(x), open('/home/martslaaf/Pictures/old_data/nonlinear_xor_1.csv').readlines())
inp_2 = map(lambda x: float(x), open('/home/martslaaf/Pictures/old_data/nonlinear_xor_2.csv').readlines())
inp_3 = map(lambda x: float(x), open('/home/martslaaf/Pictures/old_data/nonlinear_sum.csv').readlines())
outp = map(lambda x: float(x), open('/home/martslaaf/Pictures/old_data/nonlinear_target.csv').readlines())
coun = 1000
tr = []
va = []
for i in xrange(coun-250):
tr.append((np.array([inp_1[i], inp_2[i], inp_3[i]]), np.array([outp[i]])))
for i in xrange(coun-250, coun):
va.append((np.array([inp_1[i], inp_2[i], inp_3[i]]), np.array([outp[i]])))
n = wavelon_class_constructor(frame=(-200, 200), period=100)
n = n(3, 1, 19)
k = 0
track = trainer(10000, tr, va, n)
outew = []
print track
for j in va:
outew.append(n.forward(j[0])[0][0])
plot(outp[coun-250: coun], 'g')
plot(outew, 'r')
show() | [
"mart.slaaf@gmail.com"
] | mart.slaaf@gmail.com |
79419f0a0da2a49550aaafc59411b25fe4318638 | dfb751689d07e708b3c2b023844202f7bf600cef | /utils/file_utils.py | c88d3d81b6709db2522e36fb952d3085c136bff5 | [] | no_license | bewakes/nepali-nlp | 75ff3e21ca953d4e3ed82eee0798b808853e53a7 | 8f04e8ae775239dfa5bf8060652ec84c78d54333 | refs/heads/master | 2023-04-18T15:56:00.495830 | 2021-05-04T08:10:47 | 2021-05-04T08:10:47 | 353,330,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | import os
from typing import List
def get_files_recursively(directory: str) -> List[str]:
files = []
dirs = os.listdir(directory)
for d in dirs:
path = os.path.join(directory, d)
if os.path.isfile(path):
files.append(path)
else:
files.extend(get_files_recursively(path))
return files
| [
"bewakepandey@gmail.com"
] | bewakepandey@gmail.com |
6c88d27d3b37ee3630d08d1654d8b7b2c1a7f640 | dce7ca1ebab403bf7c23b77368ee26a2dd4475b6 | /tests/test_cos.py | cd57475224ee19e74c5d9fa421f172e8a7f9fb4b | [] | no_license | qcymkxyc/Graduate | 3b7e89b3f44141d9fd011c15690f902674a9e979 | 2afedacaaa3a0f4d9bbc13596d967ec8808d43d6 | refs/heads/master | 2022-12-10T12:32:37.326653 | 2018-11-10T07:49:13 | 2018-11-10T07:49:16 | 148,103,320 | 0 | 0 | null | 2022-12-08T01:14:09 | 2018-09-10T05:25:40 | Python | UTF-8 | Python | false | false | 317 | py | import unittest
from app.util import cos
class COSTestCase(unittest.TestCase):
"""
腾讯云测试
"""
def test_cos_upload(self):
"""
腾讯云cos上传测试
"""
cos.upload_binary_file(b"abcde","login_success.txt")
if __name__ == '__main__':
unittest.main()
| [
"qcymkxyc@163.com"
] | qcymkxyc@163.com |
092a0ae392c057bc745c1f605e8e6f13ef2924b6 | e2ef3afe3850ae34f531d9f38991e554bdc1ea4d | /github_messages/tasks.py | 1ce2175244adff2ea0c0b131f90b8dd00746094a | [] | no_license | ggarcia24/github_webhook | 6fa00a4d7200f1f3fe9751bcd61e1ec19599e742 | 6fd03a569a6965ac42a0fb2d38043b005c8c2179 | refs/heads/master | 2021-08-24T12:06:23.029673 | 2017-12-09T17:55:48 | 2017-12-09T17:56:01 | 113,678,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,024 | py | import fnmatch
import json
from celery.schedules import crontab
from celery.task import PeriodicTask
from celery.utils.log import get_task_logger
from django.conf import settings
from github import Github, UnknownObjectException
from .models import WebhookTransaction, PullRequestTransactionResult, RepositoryFilePattern
_github_client = Github(login_or_token=settings.GITHUB_TOKEN, base_url=settings.GITHUB_API_URL)
class UserNotAllowedException(Exception):
pass
class ProcessMessages(PeriodicTask):
run_every = crontab() # this will run once a minute
logger = get_task_logger(__name__)
def run(self, **kwargs):
# Obtain all the transactions from the DB
unprocessed_trans = self.get_transactions_to_process()
for trans in unprocessed_trans:
try:
self.process_trans(trans)
trans.status = WebhookTransaction.PROCESSED
trans.save()
except Exception as e:
self.logger.error(str(e))
trans.status = WebhookTransaction.ERROR
trans.save()
@staticmethod
def get_transactions_to_process():
return WebhookTransaction.objects.filter(
status=WebhookTransaction.UNPROCESSED
)
def process_trans(self, transaction):
# Here we should do the magic
# Obtain the json from the request
body = transaction.body
pr_info = json.loads(body)
action = pr_info['action']
pr_number = pr_info['number']
pr_owner = pr_info['pull_request']['user']['login']
pr_sender = pr_info['sender']['login']
pr_repo = pr_info['repository']['full_name']
pr_org, repo_short_name = pr_repo.split('/')
try:
organization = _github_client.get_organization(pr_org)
repository = organization.get_repo(repo_short_name)
except UnknownObjectException:
# If pr_org is not "fetchable" then that means it's a user repository!
try:
username = _github_client.get_user(pr_org)
repository = username.get_repo(repo_short_name)
except Exception as e:
raise e
pull_request = repository.get_pull(pr_number)
changed_files = pull_request.get_files()
str_changed_files = ''
# Get all the protected patterns
protected_patterns = RepositoryFilePattern.objects.all()
# We have not yet found a file that violates the protected_patterns list
found = False
allowed = False
# For all the files changed find out if one breaks the rule
for file in changed_files:
str_changed_files += file.filename + "\n"
try:
for pattern in protected_patterns:
if fnmatch.fnmatch(file.filename, pattern.pattern):
found = True
if pattern.authorized.filter(name=pr_sender).exists():
allowed = True
else:
# If we found out that user hasn't been allowed, the stop immediately
allowed = False
raise UserNotAllowedException
except UserNotAllowedException:
break
# Publish a comment to the PR stating that the PR Should not be merged
if found and not allowed:
pull_request.create_comment(
"You are not allowed to change a file extension of type '{}'".format(pattern.pattern))
# Store the transaction result
result = PullRequestTransactionResult(
webhook_transaction=transaction,
action=action,
number=int(pr_number),
owner=pr_owner,
sender=pr_sender,
repository=repository.full_name,
changed_files=str_changed_files,
permission_violation=(found and not allowed)
)
result.save()
| [
"gonzalogarcia243@gmail.com"
] | gonzalogarcia243@gmail.com |
072c6f86614407a9f19390e8d587f7145de910b7 | e77b842a09ed035a04f4ce5b304426c0b64b6685 | /tests.py | f6fa14c97bbd822585238be645f9dc041786b657 | [
"MIT"
] | permissive | kyduff/DICE-models | cb38e4bd88d9c20ae3a131534479a96f7e18f11f | 03117c088ef79373d96d334c405e1be8668d0f5a | refs/heads/master | 2023-05-02T16:10:48.728227 | 2021-05-19T18:59:11 | 2021-05-19T18:59:11 | 361,970,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | #!/usr/bin/env python
import timeit
import numpy as np
from specs import ModelSpec
import simulator
def time_test():
N = 100
test_input = np.array([0.5] + [0.5]*100 + [0.5]*100)
mspec = ModelSpec()
return timeit.Timer(lambda: simulator.objective(test_input, mspec)).autorange()
if __name__ == '__main__':
nloops, ttl_time = time_test()
print(f"time per loop: {1e3*ttl_time/nloops} ms") | [
"eaglemankyle@gmail.com"
] | eaglemankyle@gmail.com |
a8bcb9815507277dea6806401e1cd7f1e5c101bd | 21122067487a8147ac20f638d528d9538676aa2c | /mul of sparse matrix.py | ccf198e324d79fd405458edf37ca9d81f8874253 | [] | no_license | puraharreddy/dsp-lab | 6f476aafb80af0c2cd849c121b17a967afa5862b | 8b7bc5ac77f3e2ecb9109e126b75f4b4be46a19a | refs/heads/master | 2023-02-01T21:11:29.017911 | 2020-12-15T04:47:11 | 2020-12-15T04:47:11 | 290,115,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,082 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 21 21:22:04 2020
@author: sonyy
"""
#purahar reddy-22 , b-13
#Program to multiply sparse matrix
def sparse_matrix(a):
sm = []
row = len(a)
# Number of Rows
# Number of columns
col = len(a[0])
# Finding the non zero elements
for i in range(row):
for j in range(col):
if a[i][j]!=0:
sm.append([i,j,a[i][j]])
return sm
#multiplication of two sparse matrices
def mul(a1,a2):
row1 = len(a1)
row2 = len(a2)
col2 = len(a2[0])
out= [[0 for _ in range(col2)] for __ in range(row1)]
for i in range(row1):
for j in range(col2):
for k in range(row2):
out[i][j] += a1[i][k]*a2[k][j]
X = sparse_matrix(out)
return X
# printing of martix
def display(a):
if a==[]: print('EMPTY MATRIX')
for i in a:
print(*i)
# Function to take array input
def input_matrix(row):
a = [] # Declaring the matrix
i = 0
while i<row:
dup = list(map(int,input().split()))
a.append(dup)
i += 1
return a
# Inputting arrays
row1 = int(input("Enter the number of rows in first matrix : "))
col1 = int(input("Enter the number of columns in first matrix : "))
row2 = int(input("Enter the number of rows in second matrix : "))
col2 = int(input("Enter the number of columns in second matrix : "))
if col1!=row2:
print('You cannot multiply these matrices')
exit()
print("Enter Martix 1")
a1 = input_matrix(row1)
print("Enter Martix 2")
a2 = input_matrix(row2)
# Printing Original Matrices
print("The Original Matrices are")
print("Matrix 1")
display(a1)
print("Matrix 2")
display(a2)
print()
# Printing Sparse Matrices
print("The Sparse Matrices are")
sm1 = sparse_matrix(a1)
sm2 = sparse_matrix(a2)
print("Sparse Matrix 1")
display(sm1)
print("Sparse Matrix 2")
display(sm2)
print()
# Printing the result
print("Multiplication of 2 Sparse Matrices")
result = mul(sm1,sm2)
display(result) | [
"noreply@github.com"
] | noreply@github.com |
6291a6042041500296cbde2708740f0bf984e374 | 0bb3bc8eea74d316377bb1f88a8600162d83d98a | /test_demo/dianping_food_top100.py | ddf32f2ecd1973f9a3ea2ec62336876b0d284b9a | [] | no_license | WangYongjun1990/spider | 10a1f03c26a083b8a1b5e25a9180f69d50994d73 | f13d756790a19d1465624f6c8b1f0ecb87870f51 | refs/heads/master | 2020-03-08T09:16:08.748865 | 2018-04-16T01:54:26 | 2018-04-16T01:54:26 | 128,042,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | # -*- coding:utf-8 -*-
"""
File Name: `test_dianping_top100`.py
Version:
Description: 爬取南京评价最高的100家餐厅信息,对应网页 http://www.dianping.com/shoplist/search/5_10_0_score
Author: wangyongjun
Date: 2018/4/13 11:45
"""
import requests
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
}
def dianping_food_top100():
url = 'http://www.dianping.com/mylist/ajax/shoprank?cityId=5&shopType=10&rankType=score&categoryId=0'
try:
r = requests.get(url, headers=headers, timeout=10, proxies=None, verify=False)
# print r.text
except Exception as e:
print e
shop_list = r.json().get('shopBeans')
print shop_list
print type(shop_list), len(shop_list)
for shop_dict in shop_list:
print shop_dict['shopName'], shop_dict['score1'], shop_dict['score2'], shop_dict['score3'], shop_dict['avgPrice']
if __name__ == "__main__":
dianping_food_top100() | [
"yongjun.wang@mi-me.com"
] | yongjun.wang@mi-me.com |
16bb47f355f41366748376cc038a1bb95047c919 | 3b7921faee071629d3cc961f7a14cdc2af02c1df | /leecode/code/238/238.py | 7d52a9edbc2e0bd8227bc9f5c6176ac36a48a00a | [] | no_license | Xiaoboshi/Brush-Problem | 6626c503bdc4924d9a2c7d161e4a40907d415458 | 8bb5f46566aed5deace8d438e41e0b1142b26bb3 | refs/heads/master | 2023-02-26T19:57:40.578981 | 2021-02-02T07:57:02 | 2021-02-02T07:57:02 | 213,524,588 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
n = len(nums)
rec = [1] * n
rec2 = [1] * n
temp = 1
for i in range(1, n):
rec[i] = nums[i - 1] * rec[i - 1]
rec[n - 1] *= temp
for i in range(n - 2, -1, -1):
temp = nums[i + 1] * temp
rec[i] *= temp
return rec | [
"35205994+Xiaoboshi@users.noreply.github.com"
] | 35205994+Xiaoboshi@users.noreply.github.com |
8eff0f0a7ccda0cc6e4779d87cd907c9f72549f8 | f04fb8bb48e38f14a25f1efec4d30be20d62388c | /哈希表/204. 计数质数.py | 2bd3e79467b7525a3d7e1a7e82f4074be703fff9 | [] | no_license | SimmonsChen/LeetCode | d8ef5a8e29f770da1e97d295d7123780dd37e914 | 690b685048c8e89d26047b6bc48b5f9af7d59cbb | refs/heads/master | 2023-09-03T01:16:52.828520 | 2021-11-19T06:37:19 | 2021-11-19T06:37:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | """
统计所有小于非负整数 n 的质数的数量。
示例 1:
输入:n = 10
输出:4
解释:小于 10 的质数一共有 4 个, 它们是 2, 3, 5, 7 。
"""
from math import sqrt
class Solution(object):
# 题意是统计[2, n] 中质数的个数
def countPrimes(self, n):
"""
:type n: int
:rtype: int
"""
if n < 2:
return 0
# 初始化标记数组,假设都是质数
isPrim = [True] * n
isPrim[0] = False
res = 0
for i in range(2, n):
if isPrim[i]:
res += 1
for j in range(i * i, n, i):
isPrim[j] = False
return res
if __name__ == '__main__':
s = Solution()
print(s.countPrimes(10))
| [
"15097686925@163.com"
] | 15097686925@163.com |
46feca2c0d907b3ec236df10ab5ca16b6d0a071e | 3b648cd035b7fa5891a272b316507754596b0e77 | /code/reddit-analyzer.py | ac7222633f9cbd89f3e83512c9c8b9621b97ba0e | [] | no_license | andrewjoliver/PrezApprovalRatingsReddit | 9e514881805d6718189b01a8c22e4d8d2183feb6 | d5bc4090006ff9b231358788cf757ea1d74927c9 | refs/heads/master | 2020-06-12T03:58:16.150134 | 2019-06-28T01:52:53 | 2019-06-28T01:52:53 | 194,188,233 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,653 | py | import csv
import re
from textblob import TextBlob
import time
import numpy
def clean(comment):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", comment).split())
def analyze(file_loc, write_loc, first):
if first:
with open(write_loc, 'a') as file:
file.write("date,sentiment,polarity_score,comment_score\n")
file.close()
with open(file_loc) as csvfile:
reader = csv.DictReader(csvfile)
try:
for row in reader:
analysis = TextBlob(clean(row['body']))
date = time.strftime('%m-%Y', time.localtime(int(row['created_utc'])))
if analysis.sentiment.polarity > 0:
val = (date + ',positive,' + str(analysis.sentiment.polarity) + "," + row['score'] + "," + "\n")
elif analysis.sentiment.polarity == 0:
val = (date + ',neutral,' + str(analysis.sentiment.polarity) + "," + row['score'] + "," + "\n")
else:
val = (date + ',negative,' + str(analysis.sentiment.polarity) + "," + row['score'] + "," + "\n")
with open(write_loc, 'a') as file:
file.write(val)
file.close()
except UnicodeDecodeError:
print("Unicode failure.")
csvfile.close()
def monthly_rating(file_loc, write_loc, year, dates):
res = dict()
for date in dates:
res[date] = list()
for date in dates:
[list() for y in range(len(dates))]
with open(file_loc) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
loc = str(row['date'])
if loc == date:
for y in range(int(row['comment_score'])):
res[date].append(float(row['polarity_score']))
# for key in res:
# print(str(key) + " : " + str(res[key]))
calc_avg(res, write_loc)
def calc_avg(vals, write_loc):
for key in vals:
curr = numpy.asarray(vals[key])
avg_score = numpy.average(curr)
# print(str(key) + " : " + str(avg_score))
with open(write_loc, 'a') as file:
file.write(str(key) + "," + str(avg_score) + "\n")
def main():
file_loc_main = "REAPLCE WITH FILE PATH/"
write_loc_main = "REAPLCE WITH FILE PATH"
republican_years = ["2010", "2011", "2012", "2013", "2014"]
democrat_years = ["2011", "2012", "2013", "2014"]
politics_years = ["2008", "2009", "2010", "2011", "2012", "2013", "2014"]
months = ["01-", "02-", "03-", "04-", "05-", "06-", "07-", "08-", "09-", "10-", "11-", "12-"]
directories = {"obama-democrats/": democrat_years, "obama-politics/": politics_years, "obama-republicans/": republican_years}
for element in directories:
file_extension = element
years = directories[element]
file_loc = file_loc_main + file_extension + "/"
first = True
for year in years:
file_loc = file_loc + year + ".csv"
write_loc = write_loc_main + file_extension + "output.csv"
analyze(file_loc, write_loc, first)
# file_loc = write_loc
# write_loc = write_loc_main + file_extension + "monthly-results.csv"
# dates = list()
# for month in months:
# for year in years:
# dates.append(month + year)
# monthly_rating(file_loc, write_loc, year, dates)
#
# file_loc = file_loc_main + file_extension + "/"
# first = False
if __name__ == '__main__':
main()
| [
"ajo14@duke.edu"
] | ajo14@duke.edu |
d20be627a406e2379a3cd53a20a70ac4b5852db4 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/01_netCDF_extraction/merra902Combine/284-tideGauge.py | 255f5e1573a5a697bd3fef71c7b6f3022772b778 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 284
y = 285
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
328d2e6ecbb728b5834653aefb0dd9fa449653ed | 69445bf0d5bcaf970a1255dfea5fd80824f37486 | /kolory.py | 37d29f9e55160b9705398d5d73c9cb3939825999 | [] | no_license | jedrzejasgard/wersje_kolorystyczne_produktu-IDVendo | e4ddeb968bbc035dd46b2614f2e6b1b1286ddb51 | ba168286065bc0eb0babf018636dd3eec0bb1687 | refs/heads/master | 2023-03-07T03:08:45.492549 | 2021-02-23T12:13:51 | 2021-02-23T12:13:51 | 341,543,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | import json
from vendoasg.vendoasg import Vendo
import configparser
config = configparser.ConfigParser()
config.read('vendo.ini')
# połączenie z bazą vendo
vendoApi = Vendo(config.get('vendo','vendo_API_port'))
vendoApi.logInApi(config.get('vendo','logInApi_user'),config.get('vendo','logInApi_pass'))
vendoApi.loginUser(config.get('vendo','loginUser_user'),config.get('vendo','loginUser_pass'))
produkty = [
'19661',
'19672',
'19654',
'19633',
'19634'
]
with open(f"kolory.txt", 'a') as plik_kolory:
for produkt in produkty:
plikjson = r'V:/indesign_baza/jsonFiles/'+str(produkt)+'.json'
with open(plikjson) as f:
data = list(json.load(f).items())
lista_kolorow = (data[0][1]['lista_kolorow'])
for kolor in lista_kolorow:
kod = f'{produkt}-{kolor}'
kod_query = vendoApi.getJson ('/Magazyn/Towary/Towar', {"Token":vendoApi.USER_TOKEN,"Model":{"Towar":{"Kod":kod}}})
#print(f"kod - {kod_query}")
try:
numerID = kod_query["Wynik"]["Towar"]["ID"]
print(f'{kod}-->{numerID}')
plik_kolory.write(str(numerID) + "\n")
except:
print(kod_query)
| [
"j.pawlewski@asgard.gifts"
] | j.pawlewski@asgard.gifts |
c6ae34b2b23ff9afcccd235018498cdb235efb99 | 6f0e74cdc81f78ffc5dbc1b2db1cef8cbec950c4 | /aws_interface/cloud/logic/delete_function_test.py | 7a62e2c7c9241aa10726b393c1fa616aa7aa066f | [
"Apache-2.0"
] | permissive | hubaimaster/aws-interface | 125b3a362582b004a16ccd5743d7bdff69777db5 | 5823a4b45ffb3f7b59567057855ef7b5c4c4308d | refs/heads/master | 2023-01-19T15:43:38.352149 | 2023-01-12T01:38:00 | 2023-01-12T01:38:00 | 149,847,881 | 57 | 10 | Apache-2.0 | 2023-01-12T01:39:49 | 2018-09-22T05:17:43 | JavaScript | UTF-8 | Python | false | false | 742 | py |
from cloud.permission import Permission, NeedPermission
# Define the input output format of the function.
# This information is used when creating the *SDK*.
info = {
'input_format': {
'test_name': 'str',
},
'output_format': {
'success': 'bool',
}
}
@NeedPermission(Permission.Run.Logic.delete_function_test)
def do(data, resource):
partition = 'logic-function-test'
body = {}
params = data['params']
test_name = params.get('test_name')
items, _ = resource.db_query(partition, [{'option': None, 'field': 'test_name', 'value': test_name, 'condition': 'eq'}])
for item in items:
success = resource.db_delete_item(item['id'])
body['success'] = success
return body
| [
"hubaimaster@gmail.com"
] | hubaimaster@gmail.com |
5833e03ed33a8ec7549369840b1fa07513ad8d85 | 4cb40963ebc95a9e4cdd5725ac4ae882594a363d | /tests/influence/_core/test_tracin_self_influence.py | 0f327ce3fbc6230024bf4d2190c00f2750105f8c | [
"BSD-3-Clause"
] | permissive | NarineK/captum-1 | 59592277aed8c97dd8effed4af953676381d50c8 | a08883f1ba3abc96ace06b11883893419b187d09 | refs/heads/master | 2022-12-23T22:39:50.502939 | 2022-08-01T16:30:43 | 2022-08-01T16:30:43 | 215,140,394 | 1 | 0 | null | 2019-10-14T20:36:19 | 2019-10-14T20:36:19 | null | UTF-8 | Python | false | false | 5,906 | py | import tempfile
from typing import Callable
import torch
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import TracInCPFast
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
from torch.utils.data import DataLoader
class TestTracInSelfInfluence(BaseTest):
@parameterized.expand(
[
(reduction, constructor, unpack_inputs)
for unpack_inputs in [True, False]
for (reduction, constructor) in [
("none", DataInfluenceConstructor(TracInCP)),
(
"sum",
DataInfluenceConstructor(
TracInCP,
name="TracInCPFastRandProjTests",
sample_wise_grads_per_batch=True,
),
),
("sum", DataInfluenceConstructor(TracInCPFast)),
("mean", DataInfluenceConstructor(TracInCPFast)),
]
],
name_func=build_test_name_func(),
)
def test_tracin_self_influence(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=False)
# compute tracin_scores of training data on training data
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# calculate influence scores, using the training data as the test batch
train_scores = tracin.influence(
train_dataset.samples,
train_dataset.labels,
k=None,
unpack_inputs=unpack_inputs,
)
# calculate self_tracin_scores
self_tracin_scores = tracin.self_influence(
DataLoader(train_dataset, batch_size=batch_size),
outer_loop_by_checkpoints=False,
)
# check that self_tracin scores equals the diagonal of influence scores
assertTensorAlmostEqual(
self,
torch.diagonal(train_scores),
self_tracin_scores,
delta=0.01,
mode="max",
)
# check that setting `outer_loop_by_checkpoints=False` and
# `outer_loop_by_checkpoints=True` gives the same self influence scores
self_tracin_scores_by_checkpoints = tracin.self_influence(
DataLoader(train_dataset, batch_size=batch_size),
outer_loop_by_checkpoints=True,
)
assertTensorAlmostEqual(
self,
self_tracin_scores_by_checkpoints,
self_tracin_scores,
delta=0.01,
mode="max",
)
@parameterized.expand(
[
(reduction, constructor, unpack_inputs)
for unpack_inputs in [True, False]
for (reduction, constructor) in [
("none", DataInfluenceConstructor(TracInCP)),
(
"sum",
DataInfluenceConstructor(
TracInCP,
sample_wise_grads_per_batch=True,
),
),
("sum", DataInfluenceConstructor(TracInCPFast)),
("mean", DataInfluenceConstructor(TracInCPFast)),
]
],
name_func=build_test_name_func(),
)
def test_tracin_self_influence_dataloader_vs_single_batch(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
# tests that the result of calling the public method `self_influence` for a
# DataLoader of batches is the same as when the batches are collated into a
# single batch
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=False)
# create a single batch representing the entire dataset
single_batch = next(
iter(DataLoader(train_dataset, batch_size=len(train_dataset)))
)
# create a dataloader that yields batches from the dataset
dataloader = DataLoader(train_dataset, batch_size=5)
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# compute self influence using `self_influence` when passing in a single
# batch
single_batch_self_influence = tracin.self_influence(single_batch)
# compute self influence using `self_influence` when passing in a
# dataloader with the same examples
dataloader_self_influence = tracin.self_influence(dataloader)
# the two self influences should be equal
assertTensorAlmostEqual(
self,
single_batch_self_influence,
dataloader_self_influence,
delta=0.01, # due to numerical issues, we can't set this to 0.0
mode="max",
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
5a330fdb062c17741917dde0a09cea4036d78dfb | 73420826d939e43646345967e0b37f2a57ee8280 | /learning_log/settings.py | ea588cd022ac42063cf062359035ed7c51fa80b6 | [] | no_license | BearNeverWA/learning_log | 8a34263e02e36f76291b83edeebfa9a3e85e8936 | 19f5ef5990775acb62cc912d31da8800731ebf48 | refs/heads/master | 2021-08-22T21:18:32.284435 | 2017-12-01T09:42:46 | 2017-12-01T09:42:46 | 112,708,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,201 | py | """
Django settings for learning_log project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pi+a*+%p46^^mp-da^+f7kgvsr2n9mx5$d*ag7x=)du%q%7^fh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# My Application
'learning_logs',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_log.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_log.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"jinyx@rc.inesa.com"
] | jinyx@rc.inesa.com |
5996b9b454b741b90c6e2a148bc67beef53d21a0 | dc836c5435b90dcca981a2f6d4c385293b6ef8f5 | /Murder+Mystery (1).py | 8622decca2b4847819ed0d62be02a4885031c8df | [] | no_license | caitydaisy/Murder-Mystery | 56026a906c76b9e845b0288bfeb7819f27c9924c | 0352e81ddc976178a9724fa09ca3faf1b5e15807 | refs/heads/master | 2020-03-28T08:33:33.775885 | 2018-09-08T22:17:45 | 2018-09-08T22:17:45 | 147,973,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,069 | py |
# coding: utf-8
# # Preamble: A Brand new Jay
#
# After an eventful season on season 8 of *A Brand New Jay*, the 3 remaining contestants were invited to Jay Stacksby's private island for the last three episodes. When the day of filming the finale came Mr. Stacksby was found with one of his Professional Series 8-inch Chef Knives plunged through his heart! After the initial investigation highlighted that the film crew all lived in a separate house on the other side of the island, it was concluded that only the three contestants were near enough to Stacksby in order to commit a crime. At the scene of the crime, a letter was left. Here are the contents of that letter:
#
# > You may call me heartless, a killer, a monster, a murderer, but I'm still NOTHING compared to the villian that Jay was. This whole contest was a sham, an elaborate plot to shame the contestants and feed Jay's massive, massive ego. SURE you think you know him! You've seen him smiling for the cameras, laughing, joking, telling stories, waving his money around like a prop but off camera he was a sinister beast, a cruel cruel taskmaster, he treated all of us like slaves, like cattle, like animals! Do you remember Lindsay, she was the first to go, he called her such horrible things that she cried all night, keeping up all up, crying, crying, and more crying, he broke her with his words. I miss my former cast members, all of them very much. And we had to live with him, live in his home, live in his power, deal with his crazy demands. AND FOR WHAT! DID YOU KNOW THAT THE PRIZE ISN'T REAL? He never intended to marry one of us! The carrot on the stick was gone, all that was left was stick, he told us last night that we were all a terrible terrible disappointment and none of us would ever amount to anything, and that regardless of who won the contest he would never speak to any of us again! It's definitely the things like this you can feel in your gut how wrong he is! Well I showed him, he got what he deserved all right, I showed him, I showed him the person I am! I wasn't going to be pushed around any longer, and I wasn't going to let him go on pretending that he was some saint when all he was was a sick sick twisted man who deserved every bit of what he got. The fans need to know, Jay Stacksby is a vile amalgamation of all things evil and bad and the world is a better place without him.
#
# Pretty sinister stuff! Luckily, in addition to this bold-faced admission, we have the introduction letters of the three contestants. Maybe there is a way to use this information to determine who the author of this murder letter is?
#
# Myrtle Beech's introduction letter:
# > Salutations. My name? Myrtle. Myrtle Beech. I am a woman of simple tastes. I enjoy reading, thinking, and doing my taxes. I entered this competition because I want a serious relationship. I want a commitment. The last man I dated was too whimsical. He wanted to go on dates that had no plan. No end goal. Sometimes we would just end up wandering the streets after dinner. He called it a "walk". A "walk" with no destination. Can you imagine? I like every action I take to have a measurable effect. When I see a movie, I like to walk away with insights that I did not have before. When I take a bike ride, there better be a worthy destination at the end of the bike path. Jay seems frivolous at times. This worries me. However, it is my staunch belief that one does not make and keep money without having a modicum of discipline. As such, I am hopeful. I will now list three things I cannot live without. Water. Emery boards. Dogs. Thank you for the opportunity to introduce myself. I look forward to the competition.
#
# Lily Trebuchet's introduction letter:
# > Hi, I'm Lily Trebuchet from East Egg, Long Island. I love cats, hiking, and curling up under a warm blanket with a book. So they gave this little questionnaire to use for our bios so lets get started. What are some of my least favorite household chores? Dishes, oh yes it's definitely the dishes, I just hate doing them, don't you? Who is your favorite actor and why? Hmm, that's a hard one, but I think recently I'll have to go with Michael B. Jordan, every bit of that man is handsome, HANDSOME! Do you remember seeing him shirtless? I can't believe what he does for the cameras! Okay okay next question, what is your perfect date? Well it starts with a nice dinner at a delicious but small restaurant, you know like one of those places where the owner is in the back and comes out to talk to you and ask you how your meal was. My favorite form of art? Another hard one, but I think I'll have to go with music, music you can feel in your whole body and it is electrifying and best of all, you can dance to it! Okay final question, let's see, What are three things you cannot live without? Well first off, my beautiful, beautiful cat Jerry, he is my heart and spirit animal. Second is pasta, definitely pasta, and the third I think is my family, I love all of them very much and they support me in everything I do. I know Jay Stacksby is a handsome man and all of us want to be the first to walk down the aisle with him, but I think he might truly be the one for me. Okay that's it for the bio, I hope you have fun watching the show!
#
# Gregg T Fishy's introduction letter:
#
# > A most good day to you all, I am Gregg T Fishy, of the Fishy Enterprise fortune. I am 37 years young, an adventurous spirit and I've never lost my sense of childlike wonder. I do love to be in the backyard gardening and I have the most extraordinary time when I'm fishing. Fishing for what, you might find yourself asking? Why, I happen to always be fishing for compliments of course! I have a stunning pair of radiant blue eyes that will pierce the soul of anyone who dare gaze upon my countenance. I quite enjoy going on long jaunts through garden paths and short walks through greenhouses. I hope that Jay will be as absolutely interesting as he appears on the television, I find that he has some of the most curious tastes in style and humor. When I'm out and about I quite enjoy hearing tales that instill in my heart of hearts the fascination that beguiles my every day life, every fiber of my being scintillates and vascillates with extreme pleasure during one of these charming anecdotes and significantly pleases my beautiful personage. I cannot wait to enjoy being on the television program A Jay To Remember, it certainly seems like a grand time to explore life and love.
# ## Saving The Different Examples as Variables
#
# First let's create variables to hold the text data in! Save the muder note as a string in a variable called `murder_note`. Save Lily Trebuchet's introduction into `lily_trebuchet_intro`. Save Myrtle Beech's introduction into `myrtle_beech_intro`. Save Gregg T Fishy's introduction into `gregg_t_fishy_intro`.
# In[18]:
murder_note = "You may call me heartless, a killer, a monster, a murderer, but I'm still NOTHING compared to the villian that Jay was. This whole contest was a sham, an elaborate plot to shame the contestants and feed Jay's massive, massive ego. SURE you think you know him! You've seen him smiling for the cameras, laughing, joking, telling stories, waving his money around like a prop but off camera he was a sinister beast, a cruel cruel taskmaster, he treated all of us like slaves, like cattle, like animals! Do you remember Lindsay, she was the first to go, he called her such horrible things that she cried all night, keeping up all up, crying, crying, and more crying, he broke her with his words. I miss my former cast members, all of them very much. And we had to live with him, live in his home, live in his power, deal with his crazy demands. AND FOR WHAT?! DID YOU KNOW THAT THE PRIZE ISN'T REAL? He never intended to marry one of us! The carrot on the stick was gone, all that was left was stick, he told us last night that we were all a terrible terrible disappointment and none of us would ever amount to anything, and that regardless of who won the contest he would never speak to any of us again! It's definitely the things like this you can feel in your gut how wrong he is! Well I showed him, he got what he deserved all right, I showed him, I showed him the person I am! I wasn't going to be pushed around any longer, and I wasn't going to let him go on pretending that he was some saint when all he was was a sick sick twisted man who deserved every bit of what he got. The fans need to know, Jay Stacksby is a vile amalgamation of all things evil and bad and the world is a better place without him."
lily_trebuchet_intro = "Hi, I'm Lily Trebuchet from East Egg, Long Island. I love cats, hiking, and curling up under a warm blanket with a book. So they gave this little questionnaire to use for our bios so lets get started. What are some of my least favorite household chores? Dishes, oh yes it's definitely the dishes, I just hate doing them, don't you? Who is your favorite actor and why? Hmm, that's a hard one, but I think recently I'll have to go with Michael B. Jordan, every bit of that man is handsome, HANDSOME! Do you remember seeing him shirtless? I can't believe what he does for the cameras! Okay okay next question, what is your perfect date? Well it starts with a nice dinner at a delicious but small restaurant, you know like one of those places where the owner is in the back and comes out to talk to you and ask you how your meal was. My favorite form of art? Another hard one, but I think I'll have to go with music, music you can feel in your whole body and it is electrifying and best of all, you can dance to it! Okay final question, let's see, What are three things you cannot live without? Well first off, my beautiful, beautiful cat Jerry, he is my heart and spirit animal. Second is pasta, definitely pasta, and the third I think is my family, I love all of them very much and they support me in everything I do. I know Jay Stacksby is a handsome man and all of us want to be the first to walk down the aisle with him, but I think he might truly be the one for me. Okay that's it for the bio, I hope you have fun watching the show!"
myrtle_beech_intro = "Salutations. My name? Myrtle. Myrtle Beech. I am a woman of simple tastes. I enjoy reading, thinking, and doing my taxes. I entered this competition because I want a serious relationship. I want a commitment. The last man I dated was too whimsical. He wanted to go on dates that had no plan. No end goal. Sometimes we would just end up wandering the streets after dinner. He called it a \"walk\". A \"walk\" with no destination. Can you imagine? I like every action I take to have a measurable effect. When I see a movie, I like to walk away with insights that I did not have before. When I take a bike ride, there better be a worthy destination at the end of the bike path. Jay seems frivolous at times. This worries me. However, it is my staunch belief that one does not make and keep money without having a modicum of discipline. As such, I am hopeful. I will now list three things I cannot live without. Water. Emery boards. Dogs. Thank you for the opportunity to introduce myself. I look forward to the competition."
gregg_t_fishy_intro = "A most good day to you all, I am Gregg T. Fishy, of the Fishy Enterprise fortune. I am 37 years young, an adventurous spirit and I've never lost my sense of childlike wonder. I do love to be in the backyard gardening and I have the most extraordinary time when I'm fishing. Fishing for what, you might find yourself asking? Why, I happen to always be fishing for compliments of course! I have a stunning pair of radiant blue eyes that will pierce the soul of anyone who dare gaze upon my countenance. I quite enjoy going on long jaunts through garden paths and short walks through greenhouses. I hope that Jay will be as absolutely interesting as he appears on the television, I find that he has some of the most curious tastes in style and humor. When I'm out and about I quite enjoy hearing tales that instill in my heart of hearts the fascination that beguiles my every day life, every fiber of my being scintillates and vascillates with extreme pleasure during one of these charming anecdotes and significantly pleases my beautiful personage. I cannot wait to enjoy being on the television program A Jay To Remember, it certainly seems like a grand time to explore life and love."
# ## The First Indicator: Sentence Length
#
# Perhaps some meaningful data can first be gleaned from these text examples if we measure how long the average sentence length is. Different authors have different patterns of written speech, so this could be very useful in tracking down the killer.
#
# Write a function `get_average_sentence_length` that takes some `text` as an argument. This function should return the average length of a sentence in the text.
#
# Hint (highlight this hint in order to reveal it):
# <font color="white">Use your knowledge of _string methods_ to create a list of all of the sentences in a text, called **sentences_in_text**.
# Further break up each **sentences_in_text** into a list of words and save the _length_ of that list of words to a new list that contains all the sentence lengths, called **sentence_lengths**. Take the average of all of the sentence lengths by adding them all together and dividing by the number of sentences (which should be the same as the length of the **sentence_lengths**).
#
# Remember sentences can end with more than one kind of punctuation, you might find it easiest to use **.replace()** so you only have to split on one punctuation mark. Remember **.replace()** doesn't modify the string itself, it returns a new string!</font>
# In[19]:
def get_average_sentence_length(text):
clean_text = text.replace("!",".").replace("?",".").replace("..",".")
sentences_in_text = clean_text.split(".")[:-1]
sentence_lengths = [len(sentence) for sentence in sentences_in_text]
average_length = sum(sentence_lengths) / len(sentence_lengths)
return average_length
get_average_sentence_length(gregg_t_fishy_intro)
# ## Creating The Definition for Our Model
#
# Now that we have a metric we want to save and data that is coupled with that metric, it might be time to create our data type. Let's define a class called `TextSample` with a constructor. The constructor should take two arguments: `text` and `author`. `text` should be saved as `self.raw_text`. Call `get_average_sentence_length` with the raw text and save it to `self.average_sentence_length`. You should save the author of the text as `self.author`.
#
# Additionally, define a string representation for the model. If you print a `TextSample` it should render:
# - The author's name
# - The average sentence length
#
# This will be your main class for the problem at hand. All later instruction to update `TextSample` should be done in the code block below. After updating `TextSample`, click on the `Cell` option in the Jupyter Notebook main menu above, then click `Run All` to rerun the cells from top to bottom. If you need to restart your Jupyter Notebook either run the cells below first or move the `TextSample` class definition & instantiation cells to the bottom.
# In[20]:
class TextSample:
def __init__(self, text, author):
self.text = text
self.author = author
self.average_sentence_length = get_average_sentence_length(text)
self.prepared_text = prepare_text(text)
self.ngram_frequency = ngram_creator(prepare_text(text))
self.word_count_frequency = build_frequency_table(prepare_text(text))
def __repr__(self):
return "Tthe average sentence length of the text written by {} is {}". format(self.author, str(round(self.average_sentence_length)))
# ### Creating our TextSample Instances
#
# Now create a `TextSample` object for each of the samples of text that we have.
# - `murderer_sample` for the murderer's note.
# - `lily_sample` for Lily Trebuchet's note.
# - `myrtle_sample` for Myrtle Beech's note.
# - `gregg_sample` for Gregg T Fishy's note.
#
# Print out each one after instantiating them.
# In[21]:
murderer_sample = TextSample(murder_note, "Murderer's Note")
print(murderer_sample)
lily_sample = TextSample(lily_trebuchet_intro, "Lily Trebuchet")
print(lily_sample)
myrtle_sample = TextSample(myrtle_beech_intro, "Myrtle Beech")
print(myrtle_sample)
gregg_sample = TextSample(gregg_t_fishy_intro, "Gregg T.Fishy")
print(gregg_sample)
# ## Cleaning Our Data
#
# We want to compare the word choice and usage between the samples, but sentences make our text data fairly messy. In order to analyze the different messages fairly, we'll need to remove all the punctuation and uppercase letters from the samples.
#
# Create a function called `prepare_text` that takes a single parameter `text`, makes the text entirely lowercase, removes all the punctuation and returns a list of the words in the text in order.
#
# For example: `"Where did you go, friend? We nearly saw each other."` would become `['where', 'did', 'you', 'go', 'friend', 'we', 'nearly', 'saw', 'each', 'other']`.
# In[23]:
import string
def prepare_text(text):
result = ""
for char in text:
if char not in string.punctuation:
result += char
result_list = result.split(" ")
return result_list
# Update the constructor for `TextSample` to save the prepared text as `self.prepared_text`.
# ## Building A Frequency Table
#
# Now we want to see which words were most frequently used in each of the samples. Create a function called `build_frequency_table`. It takes in a list called `corpus` and creates a dictionary called `frequency_table`. For every element in `corpus` the value `frequency_table[element]` should be equal to the number of times that element appears in `corpus`. For example the input `['do', 'you', 'see', 'what', 'i', 'see']` would create the frequency table `{'what': 1, 'you': 1, 'see' 2, 'i': 1}`.
# In[24]:
def build_frequency_table(corpus):
frequency = [corpus.count(word) for word in corpus]
frequency_table = dict(zip(corpus, frequency))
return frequency_table
# ## The Second Indicator: Favorite Words
#
# Use `build_frequency_table` with the prepared text to create a frequency table that counts how frequently all the words in each text sample appears. Call these functions in the constructor for `TextSample` and assign the word frequency table to a value called `self.word_count_frequency`.
# ## The Third Indicator: N-Grams
#
# An <a href='https://en.wikipedia.org/wiki/N-gram' target="_blank">n-gram</a> is a text analysis technique used for pattern recognition and applicable throughout lingusitics. We're going to use n-grams to find who uses similar word-pairs to the murderer, and we think it's going to make our evidence strong enough to conclusively find the killer.
#
# Create a function called `ngram_creator` that takes a parameter `text_list`, a treated in-order list of the words in a text sample. `ngram_creator` should return a list of all adjacent pairs of words, styled as strings with a space in the center.
#
# For instance, calling `ngram_creator` with the input `['what', 'in', 'the', 'world', 'is', 'going', 'on']`
# Should produce the output `['what in', 'in the', 'the world', 'world is', 'is going', 'going on']`.
#
# These are two-word n-grams.
# In[25]:
def ngram_creator(text_list, n = 2):
ngram_list = []
for num in range(0, len(text_list)):
ngram = " ".join(text_list[num:num+n])
ngram_list.append(ngram)
return ngram_list
# Use `ngram_creator` along with the prepared text to create a list of all the two-word ngrams in each `TextSample`. Use `build_frequency_table` to tabulate the frequency of each ngram. In the constructor for `TextSample` save this frequency table as `self.ngram_frequency`.
# ## Comparing Two Frequency Tables
#
# We want to know how similar two frequency tables are, let's write a function that computes the comparison between two frequency tables and scores them based on similarity.
#
# Write a function called `frequency_comparison` that takes two parameters, `table1` and `table2`. It should define two local variables, `appearances` and `mutual_appearances`.
#
# Iterate through `table1`'s keys and check if `table2` has the same key defined. If it is, compare the two values for the key -- the smaller value should get added to `mutual_appearances` and the larger should get added to `appearances`. If the key doesn't exist in `table2` the value for the key in `table1` should be added to `appearances`.
#
# Remember afterwards to iterate through all of `table2`'s keys that aren't in `table1` and add those to `appearances` as well.
#
# Return a frequency comparison score equal to the mutual appearances divided by the total appearances.
# In[26]:
def frequency_comparison(table_1, table_2):
appearances = []
mutual_appearences = []
for keys_1, value_1 in table_1.items():
for keys_2, value_2 in table_2.items():
if keys_1 not in table_2.items():
appearances.append(table_1[keys_1])
elif keys_2 not in table_1.items():
appearances.append(table_2[keys_2])
for keys_1, value_1 in table_1.items():
for keys_2, value_2 in table_2.items():
if keys_1 == keys_2:
if value_1 < value_2:
mutual_appearences.append(value_1)
appearances.append(value_2)
return len(mutual_appearences)/len(appearances)
# ## Comparing Average Sentence Length
#
# In order to calculate the change between the average sentence lengths of two `TextSamples` we're going to use the formula for the percent difference.
#
# Write a function called `percent_difference` that returns the percent difference as calculated from the following formula:
#
# $$\frac{|\ value1 - value2\ |}{\frac{value1 + value2}{2}}$$
#
# In the numerator is the absolute value (use `abs()`) of the two values subtracted from each other. In the denominator is the average of the two values (value1 + value2 divided by two).
# In[27]:
def percent_difference(textsample_1, textsample_2):
numerator = abs(textsample_1.average_sentence_length - textsample_2.average_sentence_length)
denominator = abs((textsample_1.average_sentence_length + textsample_2.average_sentence_length)/2)
return numerator / denominator
# ## Scoring Similarity with All Three Indicators
#
# We want to figure out who did it, so let's use all three of the indicators we built to score text similarity. Define a function `find_text_similarity` that takes two `TextSample` arguments and returns a float between 0 and 1 where 0 means completely different and 1 means the same exact sample. You can evaluate the similarity by the following criteria:
#
# - Calculate the percent difference of their average sentence length using `percent_difference`. Save that into a variable called `sentence_length_difference`. Since we want to find how _similar_ the two passages are calculate the inverse of `sentence_length_difference` by using the formula `abs(1 - sentence_length_difference)`. Save that into a variable called `sentence_length_similarity`.
# - Calculate the difference between their word usage using `frequency_comparison` on both `TextSample`'s `word_count_frequency` attributes. Save that into a variable called `word_count_similarity`.
# - Calculate the difference between their two-word ngram using `frequency_table` on both `TextSample`'s `ngram_frequency` attributes. Save that into a variable called `ngram_similarity`.
# - Add all three similarities together and divide by 3.
# In[28]:
def find_text_similarity(text_sample_1,text_sample_2):
sentence_length_difference = percent_difference(text_sample_1, text_sample_1)
sentence_length_similarity = abs(1 - sentence_length_difference)
word_count_similarity = frequency_comparison(text_sample_1.word_count_frequency, text_sample_2.word_count_frequency)
ngram_similarity = frequency_comparison(build_frequency_table(text_sample_1.ngram_frequency), build_frequency_table(text_sample_2.ngram_frequency))
similarity_score = round((sentence_length_similarity + word_count_similarity + ngram_similarity)/3, 5)
return "The text sample written by {} has a similarity score of {} in comparison with the murder note".format(text_sample_2.author, similarity_score)
# ## Rendering the Results
#
# We want to print out the results in a way that we can read! For each contestant on _A Brand New Jay_ print out the following:
#
# - Their name
# - Their similarity score to the murder letter
# In[29]:
find_text_similarity(murderer_sample, lily_sample)
# In[30]:
find_text_similarity(murderer_sample, myrtle_sample)
# In[31]:
find_text_similarity(murderer_sample, gregg_sample)
# # Who Dunnit?
#
# In the cell below, print the name of the person who killed Jay Stacksby.
# In[ ]:
#Lily Trebuchet
| [
"noreply@github.com"
] | noreply@github.com |
4a1fc4dc9297f3161f4f30e0492a815011a04b8c | 747012e5b750cdc67748798c09b3ce1eb819568f | /strategy/migrations/0002_auto_20170703_1645.py | 3a98d12dd70048ac2070500f701c0c01dc044e67 | [
"MIT"
] | permissive | moshthepitt/probsc | da30c3829d5b8bf42804950320f006c78d2b94aa | 9b8cab206bb1c41238e36bd77f5e0573df4d8e2d | refs/heads/master | 2020-06-06T11:46:05.573933 | 2018-01-10T20:42:51 | 2018-01-10T20:42:51 | 192,730,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-03 13:45
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('strategy', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='objective',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='children', to='strategy.Objective', verbose_name='Contributes to'),
),
]
| [
"kelvin@jayanoris.com"
] | kelvin@jayanoris.com |
e3235c3eb5d23881de4ad6b62ddf588213108fd3 | 78c010f824d1c06dd9e4d598f60a11f6eca5cefc | /src/relay.py | 54842e501631ba7e7e43ab2a57aed3ea7ac4f01c | [] | no_license | AcidLeroy/sous-vide | 250db4b7f8e73facc69c2ec33abb22ad68d23e75 | 1e983a28df8785e00555b0562c51d736d61d686a | refs/heads/master | 2020-03-09T06:49:14.646638 | 2018-05-06T18:08:27 | 2018-05-06T18:08:27 | 128,649,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | #!/usr/bin/env python3
import RPi.GPIO as GPIO
from sous_videdb import SousVideDB
import time
import json
class Relay(object):
state = {True: GPIO.HIGH, False: GPIO.LOW}
def __init__(self, db, pin=26):
print('Using pin {} for relay'.format(pin))
self.pin = pin
self.db = db
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
# Initially set the relay to be off
self.db.relay_on = False
def update(self):
"""
Read what the desired state is set to in the db and then write it to the device
"""
val = self.db.relay_on[1]
GPIO.output(self.pin, Relay.state[val])
print('Setting relay on to: ', val)
def main():
config = json.load(open('config.json'))
with SousVideDB() as db:
pin = config['Relay']['CTRL']
r = Relay(pin=pin, db=db)
while True:
r.update()
time.sleep(1.0)
if __name__ == '__main__':
main()
| [
"cody.eilar@gmail.com"
] | cody.eilar@gmail.com |
b413505e73ee1e63e3b0f74a2c539b38c038f42d | c3d4bf47677a39bd9578e666419936e3e3f6f2c9 | /ex16v2.py | b1ee47d3e53e9ce856824295f412d83c8794edb6 | [] | no_license | michaelmuttiah/learnpythonthehardway | 6672831088c75177dacefd190a8535d700d1d3f2 | bc6537adcb0983c92b5fb59569c7c56eec3832d6 | refs/heads/master | 2020-03-21T20:52:18.980430 | 2018-07-10T16:24:02 | 2018-07-10T16:24:02 | 139,033,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from sys import argv
script, filename = argv
print "We're going to erase %r." % filename
print "If you don't want that hit CTRL-C (^C)."
print "If you want that hit RETURN."
raw_input("?")
print "Opening the file..."
target = open(filename, 'w')
# w = Writing onto the file (filename) which you have opened
# Or in others words making the file writeable
print "Truncating the file. Goodbye!"
target.truncate()
# truncating empties the file, so its blank
print "Now I'm going to ask you for three lines."
line1 = raw_input("line 1: ")
line2 = raw_input("line 2: ")
line3 = raw_input("line 3: ")
print "I'm going to write these to the file"
target.write(line 1, line 2, line 3)
# \n = new line
print "And finally, we close it."
target.close()
# best practise we close the file
| [
"noreply@github.com"
] | noreply@github.com |
a25245a35cacaea636067ccaec32d3b7094f710e | e5c9fc4dc73536e75cf4ab119bbc642c28d44591 | /src/leetcodepython/math/hamming_distance_461.py | 6ee39b31c590979bec6f64edd79227ce8fd40f94 | [
"MIT"
] | permissive | zhangyu345293721/leetcode | 0a22034ac313e3c09e8defd2d351257ec9f285d0 | 50f35eef6a0ad63173efed10df3c835b1dceaa3f | refs/heads/master | 2023-09-01T06:03:18.231266 | 2023-08-31T15:23:03 | 2023-08-31T15:23:03 | 163,050,773 | 101 | 29 | null | 2020-12-09T06:26:35 | 2018-12-25T05:58:16 | Java | UTF-8 | Python | false | false | 1,473 | py | # encoding='utf-8'
'''
/**
* This is the solution of No. 461 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/hamming-distance/
* <p>
* The description of problem is as follow:
* ==========================================================================================================
* 两个整数之间的汉明距离指的是这两个数字对应二进制位不同的位置的数目。
* <p>
* 给出两个整数 x 和 y,计算它们之间的汉明距离。
* <p>
* 注意:
* 0 ≤ x, y < 231.
* <p>
* 示例:
* <p>
* 输入: x = 1, y = 4
* <p>
* 输出: 2
* <p>
* 解释:
* 1 (0 0 0 1)
* 4 (0 1 0 0)
* ↑ ↑
* <p>
* 上面的箭头指出了对应二进制位不同的位置。
* <p>
* 来源:力扣(LeetCode)
* ==========================================================================================================
*
* @author zhangyu (zhangyuyu417@gmail.com)
*/'''
class Solution:
def hamming_distance(self, x: int, y: int) -> int:
'''
汉明距离
Args:
x: 数值x
y: 数值y
Returns:
距离
'''
c = x ^ y
res = 0
while c > 0:
res += (c & 1)
c = c >> 1
return res
if __name__ == '__main__':
x = 1
y = 4
solution = Solution()
res = solution.hamming_distance(x, y)
print(res)
assert res == 2
| [
"zhangyu_xtb@geekplus.cc"
] | zhangyu_xtb@geekplus.cc |
0f4787e023609643731531af8d73e021450dd660 | ca4e57a6861f1e24d1521bf5b775aee3b6db7725 | /bitonic.py | 47601639559708727cbcf4862e71d39937310f86 | [] | no_license | mathi98/madhu | e296a477f3684a596c74a228c9ce867f1f60c3f8 | cae2adb19ccf7c7f12212d694cd0d09614cd5d81 | refs/heads/master | 2020-05-23T01:06:54.830389 | 2019-06-28T14:13:07 | 2019-06-28T14:13:07 | 186,582,298 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | k=int(input())
a=[int(x) for x in input().split()]
print(max(a))
| [
"noreply@github.com"
] | noreply@github.com |
6b06e49f2dd1f681c22a44a5efc76c2112241cab | d574a7f81ac56fad4cce119f720473f66bf739e7 | /chunkpailndrom.py | 5e9cc5b9d36c6c4cbae25938a671d3988cce1c8b | [] | no_license | satyavank1988/Learning | 6c4ebfc6a6335407c388485497969e86dad179eb | a2fb71c250121c348ea7e69b3a7c250b92d5f118 | refs/heads/master | 2020-04-14T07:56:11.016567 | 2019-01-01T09:08:35 | 2019-01-01T09:08:35 | 163,725,112 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | # A O(n^2) time and O(1) space program to find the
#longest palindromic substring
# This function prints the longest palindrome substring (LPS)
# of str[]. It also returns the length of the longest palindrome
def longestPalSubstr(string):
maxLength = 1
start = 0
length = len(string)
low = 0
high = 0
# One by one consider every character as center point of
# even and length palindromes
for i in xrange(1, length):
# Find the longest even length palindrome with center
# points as i-1 and i.
low = i - 1
high = i
while low >= 0 and high < length and string[low] == string[high]:
if high - low + 1 > maxLength:
start = low
maxLength = high - low + 1
low -= 1
high += 1
# Find the longest odd length palindrome with center
# point as i
low = i - 1
high = i + 1
while low >= 0 and high < length and string[low] == string[high]:
if high - low + 1 > maxLength:
start = low
maxLength = high - low + 1
low -= 1
high += 1
print "Longest palindrome substring is:",
print string[start:start + maxLength]
return maxLength
# Driver program to test above functions
string = "forgeeksskeegfor"
print "Length is: " + str(longestPalSubstr(string))
| [
"noreply@github.com"
] | noreply@github.com |
3637a41ea27d8219504f33dd65eda2ea0971739d | dd256415176fc8ab4b63ce06d616c153dffb729f | /aditya-works-feature-python_programming (1)/aditya-works-feature-python_programming/Assigment_5_01-Aug-2019/Assigment_5_5.py | 24aa63c26add06b9baeb2c0235963e5db861b091 | [] | no_license | adityapatel329/python_works | 6d9c6b4a64cccbe2717231a7cfd07cb350553df3 | 6cb8b2e7f691401b1d2b980f6d1def848b0a71eb | refs/heads/master | 2020-07-24T17:15:39.839826 | 2019-09-12T07:53:28 | 2019-09-12T07:53:28 | 207,993,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | def accept():
name = input("Enter your string : ")
val= []
for i in name:
val.append(ord(i))
print(sum(val)/len(val))
accept()
| [
"aditya.patel@1rivet.local"
] | aditya.patel@1rivet.local |
0a6b2674de4d3094d067198aada095d0fcb8b7c6 | f23da0e02e49634e1a7401190af0bf57869b806c | /ML_Visualizations.py | cedb9fa656bb694255d338f421669f6700db73a4 | [] | no_license | gjones1911/ANN | 32522bfd5fd10f280e28beb5fe04ff7701c5cf59 | 63d43bb02d6873d89e8b0ee83b027c8b5e644b48 | refs/heads/master | 2020-04-20T16:53:25.392858 | 2019-02-03T17:29:40 | 2019-02-03T17:29:40 | 168,972,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,463 | py | import matplotlib
import matplotlib.pyplot as plt
import numpy
import matplotlib.animation as animation
# ---------------------------------------------------------------------------------------------
# --------------------- Utility functions --------------------------------------------------
# ---------------------------------------------------------------------------------------------
def make_color(clrs, hnew,gnum):
c = numpy.array([0,0,0], dtype=numpy.float)
colors = numpy.array(clrs, dtype=numpy.float)
for j in range(len(hnew[0])):
c += colors[j]*hnew[gnum][j]*.5
return list(c.tolist())
def calculate_m_b(x1, y1, x2, y2):
m = (y2-y1)/(x2-x1)
b = y1 - m*x1
return m, b
def calculate_y(x, m, b):
return m*x + b
def calculate_x(y, m, b):
return (y - b)/m
def line_calc_x(x1, y1, x2, y2, new_y):
m , b = calculate_m_b(x1, y1, x2, y2)
x = calculate_x(new_y, m, b)
return int(numpy.around(x, 0))
def generic_xy_plot(x_a, y_a, title=None, x_ax=None, y_ax=None, labels=None, legend_names=None,
marker=None, figure=1):
if marker is None:
marker = 'b-'
fig = plt.figure(figure)
plt.title(title)
plt.xlabel(x_ax)
plt.ylabel(y_ax)
plt.plot(x_a, y_a, marker)
plt.show()
return
def ani_generic_xy_plot(x_a, y_a, title=None, x_ax=None, y_ax=None, labels=None, legend_names=None,
marker=None, figure=1):
if marker is None:
marker = 'b-'
fig, ax = plt.subplots()
plt.title(title)
plt.xlabel(x_ax)
plt.ylabel(y_ax)
xdata, ydata = list(), list()
line, = plt.plot(list(), list(), marker, animated=True)
#line, = plt.plot(x_a, y_a, marker, animated=True)
def init():
# line.set_ydata([numpy.nan] * len(x_a))
ax.set_xlim(0, 11)
ax.set_ylim(0, 11**2)
return line,
def animate(i):
#print(i)
#xdata.append(i)
#ydata.append(i**2)
xdata.append(x_a[i])
ydata.append(y_a[i])
line.set_data(xdata, ydata)
if i == len(x_a)-1:
xdata.clear()
ydata.clear()
return line,
ani = animation.FuncAnimation(fig, animate, frames=numpy.arange(0,len(x_a)), init_func=init, blit=True)
plt.show()
return
def ani_multi_xy_plot(x_a, y_a, title=None, x_ax=None, y_ax=None, labels=None, legend_names=None,
marker=None, figure=1, xlim=10, ylim=10**2):
print('len x_a: ', len(x_a))
print('len x_a[0]: ', len(x_a[0]))
if marker is None:
marker = list()
marker.append('b-')
marker.append('r-')
fig, ax = plt.subplots()
plt.title(title)
plt.xlabel(x_ax)
plt.ylabel(y_ax)
xdata, ydata = list(), list()
lines = list()
for i in range(len(x_a)):
xdata.append(list())
ydata.append(list())
line, = plt.plot(list(), list(), marker[i], animated=True)
lines.append(line)
#line, = plt.plot(x_a, y_a, marker, animated=True)
def init():
ax.set_xlim(0, xlim)
ax.set_ylim(0, ylim)
return line,
def animate(i):
print('i: ',i)
for x in range(len(x_a)):
xdata[x].append(x_a[x][i])
ydata[x].append(y_a[x][i])
#line = lines[x]
lines[x].set_data(xdata[x], ydata[x])
if i == len(x_a[0])-1:
for a in range(len(x_a)):
xdata[a].clear()
#xdata[a].append(list())
ydata[a].clear()
#ydata[a].append(list())
return lines[0], lines[1]
ani = animation.FuncAnimation(fig, animate, frames=numpy.arange(0,len(x_a[0])), init_func=init, blit=True)
plt.show()
return
def multi_y_plotter(x_a, y_a, title='Multi Y Plot', leg_a = ['red','green','purple'], x_label='X',
y_label='Y', show_it=True):
x_len = len(x_a)
y_len = len(y_a[0])
if x_len != y_len:
print('x and y must be same length but x is {:d} and y is {:d}'.format(x_len, y_len))
return -1
fig = plt.figure()
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
cnt = 0
for y in y_a:
if cnt == 0:
symbol_type = 'r-'
elif cnt == 1:
symbol_type = 'g-'
elif cnt == 2:
symbol_type = 'm-'
elif cnt == 3:
symbol_type = 'b-'
elif cnt == 4:
symbol_type = 'y-'
plt.plot(x_a, y, symbol_type, linewidth=2)
cnt = cnt + 1
leg = plt.legend(leg_a, loc='best',
borderpad=0.3, shadow=False,
prop=matplotlib.font_manager.FontProperties(size='medium'),
markerscale=0.4)
leg.get_frame().set_alpha(0.4)
leg.draggable(state=True)
if show_it:
plt.show()
return
# ---------------------------------------------------------------------------------------------------------------------
# -------------------------------- ANN graphical methods -----------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
# -------------------------------- k means clustering --------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
def make_scree_graph_data(np_data_array, show_it=True):
u, s, vh = numpy.linalg.svd(np_data_array, full_matrices=True, compute_uv=True)
v = numpy.transpose(vh)
sum_s = sum(s.tolist())
s_sum = numpy.cumsum(s)[-1]
print('shape of s')
print(s.shape)
obs_var = np_data_array.shape
num_obs = obs_var[0]
num_var = obs_var[1]
print('There are {:d} observations and {:d} variables or attributes'.format(num_obs, num_var))
eigen_vals = s ** 2 /s_sum
single_vals = numpy.arange(num_obs)
fig = plt.figure(figsize=(8, 5))
plt.plot(single_vals, eigen_vals, 'ro-', linewidth=2)
plt.title('Scree Plot')
plt.xlabel('Principal Component')
plt.ylabel('Eigenvalue')
# I don't like the default legend so I typically make mine like below, e.g.
# with smaller fonts and a bit transparent so I do not cover up data, and make
# it moveable by the viewer in case upper-right is a bad place for it
leg = plt.legend(['Eigenvalues from SVD'], loc='best', borderpad=0.3,
shadow=False, prop=matplotlib.font_manager.FontProperties(size='small'),
markerscale=0.4)
leg.get_frame().set_alpha(0.4)
leg.draggable(state=True)
if show_it:
plt.show()
return u, s, vh, v
def make_scree_plot_usv(s, num_vars, show_it=True, last=False, k_val=3, annot=True):
sum_s = sum(s.tolist())
s_sum = numpy.cumsum(s**2)[-1]
eigen_vals = (s ** 2) / s_sum
single_vals = numpy.arange(num_vars)
kret = 0
oldp = -900
for i in range(1, num_vars-1):
if numpy.around((eigen_vals[i-1] - eigen_vals[i]), 2) == 0:
#if (eigen_vals[i-1] - eigen_vals[i]) == 0:
#print('prev', eigen_vals[i-1])
#print('curnt', eigen_vals[i])
#print('---------',eigen_vals[i]/eigen_vals[i-1])
#crnt = (eigen_vals[i]/eigen_vals[i-1])
#if crnt < oldp:
#if (eigen_vals[i] - eigen_vals[i-1]) < .05:
kret = i
break
#oldp = crnt
print('k for sckree tis ', kret)
if show_it:
fig = plt.figure(figsize=(8, 5))
plt.plot(single_vals, eigen_vals, 'ro-', linewidth=2)
plt.title('Scree Plot')
plt.xlabel('Principal Component')
plt.ylabel('Eigenvalue')
index_k = list(single_vals.tolist()).index(k_val)
print('k_val',k_val)
#plt.plot(k_val, eigen_vals[index_k], 'go')
plt.plot(k_val, eigen_vals[k_val], 'go')
plt.plot(kret, eigen_vals[kret], 'bo')
leg = plt.legend(['# of PC\'s vs. Eigen values','k from POV','elbow estimate'], loc='best',
borderpad=0.3,shadow=False,
prop=matplotlib.font_manager.FontProperties(size='small'),
markerscale=0.4)
leg.get_frame().set_alpha(0.4)
leg.draggable(state=True)
if last:
plt.show()
return kret+1
def make_prop_o_var_plot(s, num_obs, show_it=True, last_plot=True):
sum_s = sum(s.tolist())
ss = s**2
sum_ss = sum(ss.tolist())
prop_list = list()
found = False
k = 0
x1, y1, x2, y2, = 0,0,0,0
p_l, i_l = 0, 0
found = False
for i in range(1, num_obs+1):
perct = sum(ss[0:i]) / sum_ss
#perct = sum(s[0:i]) / sum_s
if numpy.around((perct*100), 0) >= 90 and not found:
y2 = perct
x2 = i
x1 = i_l
y1 = p_l
found = True
prop_list.append(perct)
i_l = i
p_l = perct
if numpy.around(y2, 2) == .90:
k_val = x2
else:
print('it is over 90%',x2)
k_val = line_calc_x(x1, y1, x2, numpy.around(y2,2), .9)
single_vals = numpy.arange(1,num_obs+1)
if show_it:
fig = plt.figure(figsize=(8, 5))
plt.plot(single_vals, prop_list, 'ro-', linewidth=2)
plt.title('Proportion of Variance, K should be {:d}'.format(x2))
plt.xlabel('Eigenvectors')
plt.ylabel('Prop. of var.')
p90 = prop_list.index(y2)
#plt.plot(k_val, prop_list[p90], 'bo')
plt.plot(x2, prop_list[p90], 'bo')
leg = plt.legend(['Eigenvectors vs. Prop. of Var.','90% >= variance'], loc='best', borderpad=0.3,
shadow=False, prop=matplotlib.font_manager.FontProperties(size='small'),
markerscale=0.4)
leg.get_frame().set_alpha(0.4)
leg.draggable(state=True)
if last_plot:
plt.show()
return x2
def dual_scree_prop_var(s, num_obs):
sum_s = sum(s.tolist())
eigen_vals = s ** 2 /sum_s
single_vals = numpy.arange(num_obs)
prop_list = list()
for i in range(1, num_obs + 1):
prop_list.append(sum(s[0:i].tolist()) / sum_s)
fig, axs = plt.subplots(2,1)
#plt.figure(1)
#fig = plt.figure(figsize=(8, 5))
axs[0].plot(single_vals, eigen_vals, 'ro-', linewidth=2)
plt.title('Scree Plot')
#axs[0].title('Scree Plot')
axs[0].set_xlabel('Principal Component')
axs[0].set_ylabel('Eigenvalue')
leg = axs[0].legend(['Eigenvalues from SVD'], loc='best', borderpad=0.3,
shadow=False, prop=matplotlib.font_manager.FontProperties(size='small'),
markerscale=0.4)
leg.get_frame().set_alpha(0.4)
leg.draggable(state=True)
#plt.figure(2)
axs[1].plot(single_vals, prop_list, 'go-', linewidth=2)
#plt.title('Proportion of Variance')
axs[1].set_xlabel('Eigenvectors')
axs[1].set_ylabel('Prop. of var.')
leg = plt.legend(['Eigenvectors vs. Prop. of Var.'], loc='best', borderpad=0.3,
shadow=False, prop=matplotlib.font_manager.FontProperties(size='small'),
markerscale=0.4)
leg.get_frame().set_alpha(0.4)
leg.draggable(state=True)
'''
plt.subplot(2,2,1)
#fig = plt.figure(figsize=(8, 5))
plt.plot(single_vals, prop_list, 'ro-', linewidth=2)
plt.title('Proportion of Variance')
plt.xlabel('Eigenvectors')
plt.ylabel('Prop. of var.')
leg = plt.legend(['Eigenvectors vs. Prop. of Var.'], loc='best', borderpad=0.3,
shadow=False, prop=matplotlib.font_manager.FontProperties(size='small'),
markerscale=0.4)
leg.get_frame().set_alpha(0.4)
leg.draggable(state=True)
'''
plt.show()
return
def basic_scatter_plot(x, y, x_label, y_label, title, legend):
fig = plt.figure(figsize=(8, 5))
plt.scatter(x, y)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
leg = plt.legend([legend], loc='best', borderpad=0.3,
shadow=False, prop=matplotlib.font_manager.FontProperties(size='small'),
markerscale=0.4)
leg.get_frame().set_alpha(0.4)
leg.draggable(state=True)
plt.show()
return
def z_scatter_plot(Z, schools, x_label='z1', y_label='z2', title='PC1 vs. PC2 for all Observations',
legend='z1 vs. z2', show_it=True, last=False, point_size=20, color=[[0,0,0]], annote=True):
if show_it:
fig = plt.figure(figsize=(8, 5))
i = 0
for row in Z:
z1 = row[0]
z2 = row[1]
plt.scatter(z1, z2, s=point_size, c=color)
if annote:
plt.annotate(schools.index(schools[i]), (z1, z2))
i += 1
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
leg = plt.legend([legend], loc='best', borderpad=0.3,
shadow=False, prop=matplotlib.font_manager.FontProperties(size='small'),
markerscale=0.4)
leg.get_frame().set_alpha(0.4)
leg.draggable(state=True)
if last:
plt.show()
return
def k_cluster_scatter_plot(Z, schools, mid, groups, x_label='x1', y_label='x2', title='PC1 vs. PC2 for all Observations',
legend='z1 vs. z2', show_it=True, colors=[[.8, .4, .2]], b_list=[] ,g_ids = {},
show_center=True, last=False, groups_l=[], em=False, hnew=numpy.array([]), an_type=0,
annote=True):
row_mid = len(mid)
markers_s = list(['o','^','s','*'])
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
i = 0
for row in Z:
z1 = row[0]
z2 = row[1]
if len(b_list) > 0:
# grab group i call it l
l = list(b_list[i].tolist())
# look for what group this observation is a part of
# call the group number midx
#midx = l.index(1) % len(colors)
grpnm = l.index(1)
midx = grpnm % 9
m = markers_s[grpnm%4]
#print(l.index(1))
'''
if not em:
if schools.index(schools[i]) == -22:
ax.scatter(z1, z2, s=30, c=[255/255, 144/255, 18/255])
else:
ax.scatter(z1, z2, s=20, c=colors[midx])
else:
if schools.index(schools[i]) == -22:
ax.scatter(z1, z2, s=30, c=[255/255, 144/255, 18/255])
else:
ax.scatter(z1, z2, s=20, c=colors[midx])
'''
#ax.scatter(z1, z2, s=20, c=colors[midx])
ax.scatter(z1, z2, s=20, c=colors[midx], marker=m)
if annote:
if len(groups_l) > 0:
ax.annotate(groups_l[i], (z1, z2))
elif len(b_list[i]) > 100:
ax.annotate(grpnm, (z1, z2))
else:
ax.annotate(schools.index(schools[i]), (z1, z2))
i += 1
if show_center:
#r_c = b_list.shape
#bii = list()
i = 0
# for row in mid:
for row, color in zip(mid, colors):
m1 = row[0]
m2 = row[1]
if len(hnew) > 0:
gmx = numpy.max(hnew[:, i])
gmd = numpy.median(hnew[:, i])
gmn = numpy.min(hnew[:, i])
glist = list([gmx, gmd,gmn])
#glist = list([gmn, gmd,gmx])
for m in range(len(mid)):
ax.scatter(m1, m2, s=3000*(glist[m]), c=color, alpha=(1/(m+1))*glist[m-1])
else:
ax.scatter(m1, m2, s=3000, c=color, alpha=.5)
# ax.annotate(groups[i], (m1, m2), arrowprops=dict(facecolor='black', shrink=1.05))
ax.annotate(groups[i], (m1, m2))
i += 1
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
#leg = plt.legend(legend, loc='best', borderpad=0.3,
# shadow=False, prop=matplotlib.font_manager.FontProperties(size='small'),
# markerscale=0.4, )
#leg.get_frame().set_alpha(0.4)
#leg.draggable(state=True)
if last:
plt.show()
return
| [
"gjones2@vols.utk.edu"
] | gjones2@vols.utk.edu |
9fb6a68ceb3cf80621af5ba80af61427c4540b14 | e1450725c9637e15709064aaa48bc4e053a213d5 | /tests/test_funcptrdecl.py | a4d3a4d89874a4fe3280f0584e431cc6717bed5d | [] | no_license | gotoc/PyCParser-1 | 9d4e4c40a8c24923a689b1a0e3ebd4f07528d75b | b00cdd67a688792c0bc49b383a36199c50cc5cf2 | refs/heads/master | 2021-01-20T10:54:25.196102 | 2014-09-11T12:27:29 | 2014-09-11T12:27:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,717 | py | import sys
sys.path += [".."]
from pprint import pprint
from cparser import *
import test
testcode = """
int16_t (*f)();
int16_t (*g)(char a, void*);
int (*h);
// ISO/IEC 9899:TC3 : C99 standard
int fx(void), *fip(), (*pfi)(); // example 1, page 120
int (*apfi[3])(int *x, int *y); // example 2, page 120
int (*fpfi(int (*)(long), int))(int, ...); // example 3, page 120
"""
state = test.parse(testcode)
f = state.vars["f"]
g = state.vars["g"]
assert f.name == "f"
assert isinstance(f.type, CFuncPointerDecl)
assert f.type.type == CStdIntType("int16_t")
assert f.type.args == []
assert isinstance(g.type, CFuncPointerDecl)
gargs = g.type.args
assert isinstance(gargs, list)
assert len(gargs) == 2
assert isinstance(gargs[0], CFuncArgDecl)
assert gargs[0].name == "a"
assert gargs[0].type == CBuiltinType(("char",))
assert gargs[1].name is None
assert gargs[1].type == CBuiltinType(("void","*"))
h = state.vars["h"]
assert h.type == CPointerType(CBuiltinType(("int",)))
fx = state.funcs["fx"] # fx is a function `int (void)`
assert fx.type == CBuiltinType(("int",))
assert fx.args == []
fip = state.funcs["fip"] # fip is a function `int* (void)`
assert fip.type == CPointerType(CBuiltinType(("int",)))
assert fip.args == []
pfi = state.vars["pfi"] # pfi is a function-ptr to `int ()`
assert isinstance(pfi.type, CFuncPointerDecl)
assert pfi.type.type == CBuiltinType(("int",))
assert pfi.type.args == []
apfi = state.vars["apfi"] # apfi is an array of three function-ptrs `int (int*,int*)`
# ...
fpfi = state.funcs["fpfi"] # function which returns a func-ptr
# the function has the parameters `int(*)(long), int`
# the func-ptr func returns `int`
# the func-ptr func has the parameters `int, ...`
| [
"albert.zeyer@rwth-aachen.de"
] | albert.zeyer@rwth-aachen.de |
3f8985be40b438a6f727f99ad4571962005a54c1 | 919388c3d7561937a98520d9c982f595422581a3 | /demo4.py | f105e3c23fb7178bab3b91e2455eab0b5c1d1949 | [] | no_license | WangJie1994/tensorflow_temp | 2325b94f7709d23bab211964ca58104f6056f008 | 70af730c859ff32939f6c62e2e191fd5d6991a9c | refs/heads/master | 2020-03-16T19:24:45.411913 | 2018-05-14T01:17:33 | 2018-05-14T01:17:33 | 132,914,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 9 14:05:31 2018
@author: wangj
"""
import tensorflow as tf
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.multiply(input1, input2)
with tf.Session() as sess:
print(sess.run(output, feed_dict={input1:[7], input2:[5]}))
| [
"wangjie19940416@icloud.com"
] | wangjie19940416@icloud.com |
b25c5fbb4c2f172d0c011800ca4eb22f6ea0927e | 7d0e35a6d9b188eed2c29ff0d71116e00679c4b6 | /source/webapp/migrations/0003_delete_favorites.py | 73f9b66f7eaef457fe146ecf57ae40d30cba3873 | [] | no_license | neroznik/exam_9 | 11bd47fe09d527943c5d145ce6ae4c0d6376ae73 | ad37fd72cc8ca16bdd5bc6f83e8f01e1b6d02196 | refs/heads/main | 2023-01-08T22:11:11.275006 | 2020-10-24T13:31:53 | 2020-10-24T13:31:53 | 306,645,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | # Generated by Django 2.2 on 2020-10-24 08:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webapp', '0002_auto_20201024_0823'),
]
operations = [
migrations.DeleteModel(
name='Favorites',
),
]
| [
"neroznik@yandex.ru"
] | neroznik@yandex.ru |
f9e2cfae13bab1f5c5f8fabdf35dd52c318b1683 | a8a923b9a40190989f35b14a72fb6c683bf99201 | /customer_clustering_insurance_company/helperFunctions.py | 06979bf6d149f0205e548a4e1ce8f4d8db3b574b | [] | no_license | mrmachatschek/data_science_portfolio | 12d8f8a38c4dab6f55efbcd6e87e5e3b3d7bb7ef | 722a659ba5cb1a81c50bb1a8b30579d1524de6e6 | refs/heads/master | 2020-09-08T17:45:18.785904 | 2020-05-08T12:35:04 | 2020-05-08T12:35:04 | 221,199,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,348 | py | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler
from kmodes.kprototypes import KPrototypes
from sklearn.cluster import KMeans
import matplotlib.cm as cm
from sklearn.cluster import AgglomerativeClustering
import scipy.cluster.hierarchy as shc
from sklearn.metrics import silhouette_samples
from sklearn.metrics import silhouette_score #avg of avgs
def get_outliers_i(df_num, column, threshold, direction="pos"):
if threshold == 0:
return []
if direction == "pos":
outliers_i = df_num[df_num[column] > threshold].index.values
if direction == "neg":
outliers_i = df_num[df_num[column] < threshold].index.values
return outliers_i
def create_silgraph(df, labels):
sample_silhouette_values = silhouette_samples(df, labels )
n_clusters = len(np.unique(labels))
y_lower = 100
fig = plt.figure()
ax1 = fig.add_subplot(111)
for i in range(n_clusters):
ith_cluster_silhouette_values = sample_silhouette_values[labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i=ith_cluster_silhouette_values. shape[0]
y_upper = y_lower + size_cluster_i
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),0, ith_cluster_silhouette_values,facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
def create_elbowgraph(n, df, type="kmeans", categorical=[0]):
if type == "kmeans":
clusters = []
for i in range(1,n):
kmeans = KMeans(n_clusters=i, random_state=1).fit(df)
clusters.append(kmeans.inertia_)
print("Calculated kmeans with " + str(i) + " clusters")
elif type == "kproto":
clusters = []
for i in range(1,n):
kproto = KPrototypes(n_clusters=i, init='random', random_state=1).fit(df, categorical=categorical)
clusters.append(kproto.cost_)
print("Calculated kproto with " + str(i) + " clusters")
plt.plot(range(1,n), clusters,'go--')
plt.title("Elbow graph for customer features")
plt.xlabel("Number of cluster")
plt.ylabel("within-cluster sum-of-squares (inertia)") | [
"m20190054@novaims.unl.pt"
] | m20190054@novaims.unl.pt |
688cb82adce9a0a1f2e93bdc547a6257b3b1a89a | db9cfe9c9eae6883d06dd57ff79a74c743072c7c | /terminal.py | 837936575f059c203d856f5592f6d6d8bd3852d9 | [
"MIT"
] | permissive | Louis-Navarro/sudoku | 66c9848bde772a1cdfe9ed19ed62a904a5a7dd35 | a29d518f42a9e2bf93f29fc7916bcd28ac48fd30 | refs/heads/master | 2020-11-25T21:28:09.440313 | 2020-02-13T19:10:50 | 2020-02-13T19:10:50 | 228,851,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,775 | py | from colorama import Fore, init
import general
import solver
init()
RED = Fore.RED
GREEN = Fore.GREEN
BLUE = Fore.BLUE
RESET = Fore.RESET
def print_grid(hidden_grid, showed_grid):
for i in range(81):
row = i // 9
col = i % 9
index = (row, col)
if hidden_grid[index] == 0:
print(GREEN, showed_grid[index], end='')
else:
print(RED, showed_grid[index], end='')
if col in (2, 5, 8):
print(' ', end=' ')
if row in (2, 5, 8) and col == 8:
print()
if col == 8:
print(RESET)
def main():
initial_grid = general.create_grid()
hidden_grid = general.hide_grid(initial_grid.copy())
showed_grid = hidden_grid.copy()
solved = solver.solve(hidden_grid.copy())
while not general.check_grid(showed_grid):
print_grid(hidden_grid, showed_grid)
print(f'{RED}RED {RESET}= Immutable')
print(f'{GREEN}GREEN {RESET}= Mutable')
print(f'{BLUE}Row 0 {RESET}= Solution')
row = int(input('What row do you want to change : ')) - 1
if row == -1:
print(f'You abandoned, the computer found :')
print_grid(hidden_grid, solved)
break
col = int(input('What column do you want to change : ')) - 1
index = (row, col)
if hidden_grid[index] != 0:
print('Immutable number !')
continue
num = int(input('Enter the number to put : '))
showed_grid[index] = num
if general.check_grid(showed_grid):
print('Well done !')
print_grid(hidden_grid, showed_grid)
print('This is what the computer found :')
print_grid(hidden_grid, solved)
if __name__ == "__main__":
main()
| [
"azuiops.tv@gmail.com"
] | azuiops.tv@gmail.com |
1cf3258065e5e16f983f1864dafea93785bb628e | d1cbaf70e1949eb11d5b10cd7a10d3859599bd41 | /lib/roi_data_layer/roibatchLoader.py | 3677aa16e144989d048b26d6041e5985aaa9020d | [
"MIT"
] | permissive | benedictflorance/da-faster-rcnn-PyTorch | 4350190d318923eb7cf08b233ad1e3c09dc4ba17 | c0058937dd1370cedc777b01d36343bf30fe1bad | refs/heads/master | 2021-07-03T15:04:26.484516 | 2020-10-23T08:00:55 | 2020-10-23T08:00:55 | 189,517,854 | 0 | 0 | NOASSERTION | 2019-05-31T03:00:00 | 2019-05-31T03:00:00 | null | UTF-8 | Python | false | false | 9,091 | py |
"""The data layer used during training to train a Fast R-CNN network.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
from PIL import Image
import torch
from model.utils.config import cfg
from roi_data_layer.minibatch import get_minibatch, get_minibatch
from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
import numpy as np
import random
import time
import pdb
class roibatchLoader(data.Dataset):
def __init__(self, roidb, ratio_list, ratio_index, batch_size, num_classes, training=True, normalize=None):
self._roidb = roidb
self._num_classes = num_classes
# we make the height of image consistent to trim_height, trim_width
self.trim_height = cfg.TRAIN.TRIM_HEIGHT
self.trim_width = cfg.TRAIN.TRIM_WIDTH
self.max_num_box = cfg.MAX_NUM_GT_BOXES
self.training = training
self.normalize = normalize
self.ratio_list = ratio_list
self.ratio_index = ratio_index
self.batch_size = batch_size
self.data_size = len(self.ratio_list)
# given the ratio_list, we want to make the ratio same for each batch.
self.ratio_list_batch = torch.Tensor(self.data_size).zero_()
num_batch = int(np.ceil(len(ratio_index) / batch_size))
for i in range(num_batch):
left_idx = i*batch_size
right_idx = min((i+1)*batch_size-1, self.data_size-1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
self.ratio_list_batch[left_idx:(right_idx+1)] = target_ratio # trainset ratio list ,each batch is same number
def __getitem__(self, index): # only one sample
if self.training:
index_ratio = int(self.ratio_index[index])
else:
index_ratio = index
# get the anchor index for current sample index
# here we set the anchor index to the last one
# sample in this group
minibatch_db = [self._roidb[index_ratio]]
blobs = get_minibatch(minibatch_db, self._num_classes)
data = torch.from_numpy(blobs['data'])
im_info = torch.from_numpy(blobs['im_info'])
# we need to random shuffle the bounding box.
data_height, data_width = data.size(1), data.size(2)
if self.training:
np.random.shuffle(blobs['gt_boxes'])
gt_boxes = torch.from_numpy(blobs['gt_boxes'])
########################################################
# padding the input image to fixed size for each group #
########################################################
# NOTE1: need to cope with the case where a group cover both conditions. (done)
# NOTE2: need to consider the situation for the tail samples. (no worry)
# NOTE3: need to implement a parallel data loader. (no worry)
# get the index range
# if the image need to crop, crop to the target size.
ratio = self.ratio_list_batch[index]
if self._roidb[index_ratio]['need_crop']:
if ratio < 1:
# this means that data_width << data_height, we need to crop the
# data_height
min_y = int(torch.min(gt_boxes[:,1]))
max_y = int(torch.max(gt_boxes[:,3]))
trim_size = int(np.floor(data_width / ratio))
if trim_size > data_height:
trim_size = data_height
box_region = max_y - min_y + 1
if min_y == 0:
y_s = 0
else:
if (box_region-trim_size) < 0:
y_s_min = max(max_y-trim_size, 0)
y_s_max = min(min_y, data_height-trim_size)
if y_s_min == y_s_max:
y_s = y_s_min
else:
y_s = np.random.choice(range(y_s_min, y_s_max))
else:
y_s_add = int((box_region-trim_size)/2)
if y_s_add == 0:
y_s = min_y
else:
y_s = np.random.choice(range(min_y, min_y+y_s_add))
# crop the image
data = data[:, y_s:(y_s + trim_size), :, :]
# shift y coordiante of gt_boxes
gt_boxes[:, 1] = gt_boxes[:, 1] - float(y_s)
gt_boxes[:, 3] = gt_boxes[:, 3] - float(y_s)
# update gt bounding box according the trip
gt_boxes[:, 1].clamp_(0, trim_size - 1)
gt_boxes[:, 3].clamp_(0, trim_size - 1)
else:
# this means that data_width >> data_height, we need to crop the
# data_width
min_x = int(torch.min(gt_boxes[:,0]))
max_x = int(torch.max(gt_boxes[:,2]))
trim_size = int(np.ceil(data_height * ratio))
if trim_size > data_width:
trim_size = data_width
box_region = max_x - min_x + 1
if min_x == 0:
x_s = 0
else:
if (box_region-trim_size) < 0:
x_s_min = max(max_x-trim_size, 0)
x_s_max = min(min_x, data_width-trim_size)
if x_s_min == x_s_max:
x_s = x_s_min
else:
x_s = np.random.choice(range(x_s_min, x_s_max))
else:
x_s_add = int((box_region-trim_size)/2)
if x_s_add == 0:
x_s = min_x
else:
x_s = np.random.choice(range(min_x, min_x+x_s_add))
# crop the image
data = data[:, :, x_s:(x_s + trim_size), :]
# shift x coordiante of gt_boxes
gt_boxes[:, 0] = gt_boxes[:, 0] - float(x_s)
gt_boxes[:, 2] = gt_boxes[:, 2] - float(x_s)
# update gt bounding box according the trip
gt_boxes[:, 0].clamp_(0, trim_size - 1)
gt_boxes[:, 2].clamp_(0, trim_size - 1)
# based on the ratio, padding the image.
if ratio < 1:
# this means that data_width < data_height
trim_size = int(np.floor(data_width / ratio))
padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), \
data_width, 3).zero_()
padding_data[:data_height, :, :] = data[0]
# update im_info
im_info[0, 0] = padding_data.size(0)
# print("height %d %d \n" %(index, anchor_idx))
elif ratio > 1:
# this means that data_width > data_height
# if the image need to crop.
padding_data = torch.FloatTensor(data_height, \
int(np.ceil(data_height * ratio)), 3).zero_()
padding_data[:, :data_width, :] = data[0]
im_info[0, 1] = padding_data.size(1)
else:
trim_size = min(data_height, data_width)
padding_data = torch.FloatTensor(trim_size, trim_size, 3).zero_()
padding_data = data[0][:trim_size, :trim_size, :]
# gt_boxes.clamp_(0, trim_size)
gt_boxes[:, :4].clamp_(0, trim_size)
im_info[0, 0] = trim_size
im_info[0, 1] = trim_size
# check the bounding box:
not_keep = (gt_boxes[:,0] == gt_boxes[:,2]) | (gt_boxes[:,1] == gt_boxes[:,3])
keep = torch.nonzero(not_keep == 0).view(-1)
gt_boxes_padding = torch.FloatTensor(self.max_num_box, gt_boxes.size(1)).zero_()
if keep.numel() != 0:
gt_boxes = gt_boxes[keep]
num_boxes = min(gt_boxes.size(0), self.max_num_box)
gt_boxes_padding[:num_boxes,:] = gt_boxes[:num_boxes]
else:
num_boxes = 0
# permute trim_data to adapt to downstream processing
padding_data = padding_data.permute(2, 0, 1).contiguous()
im_info = im_info.view(3)
return padding_data, im_info, gt_boxes_padding, num_boxes
else:
data = data.permute(0, 3, 1, 2).contiguous().view(3, data_height, data_width)
im_info = im_info.view(3)
gt_boxes = torch.FloatTensor([1,1,1,1,1])
num_boxes = 0
return data, im_info, gt_boxes, num_boxes
def __len__(self):
return len(self._roidb)
| [
"noreply@github.com"
] | noreply@github.com |
b1f3c80000cc7153cb54b0c8ff458c6fbfb15d64 | 235c1afc67f837779d3ab4f83e75e7ff3b39c7c4 | /src/etf_ohlcv_daily/utils.py | be8737bb9f58e0c8063da0e400cc8723a2c72c67 | [
"MIT"
] | permissive | phaesoo/nesta-jobs | b8782be138bb9bd521a06f23e9be3adbd9443671 | 230961b5b9fa35f9a2a08e7519f25ec8841d7513 | refs/heads/master | 2022-12-21T14:31:59.217458 | 2020-05-24T04:25:54 | 2020-05-24T04:25:54 | 250,958,145 | 0 | 0 | MIT | 2022-12-08T09:54:47 | 2020-03-29T05:07:32 | Python | UTF-8 | Python | false | false | 678 | py | from datetime import datetime
from pandas import DataFrame
import pandas_datareader as pdr
def get_ohlcv(ticker: str, start: datetime = None, end: datetime = None) -> DataFrame:
assert isinstance(ticker, str), "Type Error"
df = pdr.DataReader(ticker, "yahoo")
df = df.rename(
columns={"High": "high", "Low": "low", "Open": "open",
"Close": "close", "Volume": "volume", "Adj Close": "adj_close"}
)
# calc return
adj_close_s = df.adj_close
df["return"] = adj_close_s.diff() / adj_close_s.shift(1)
# trim index
if start:
df = df[df.index >= start]
if end:
df = df[df.index <= end]
return df
| [
"phaesoo@gmail.com"
] | phaesoo@gmail.com |
315ce8c930def547b1f04621e45ad7fd3bdae3f4 | 5e033119a827297aff391936660b7bd10bee7fd6 | /backend/geolocation_portal/category_sparetime/migrations/0002_openinghourssportcentreentry.py | b2bd828e87c2647a3c778a9f3c34afa078d58c04 | [] | no_license | kela4/DHBW-Softwareprojekt | 7b557f04927ac32a68b00ac33040221787c789db | 7c5178eb39a18bf7a28d5b6ee9570889b09f3dae | refs/heads/master | 2022-12-16T03:17:27.315129 | 2020-02-16T14:39:25 | 2020-02-16T14:39:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | # Generated by Django 3.0 on 2020-01-13 19:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('geomodels', '0011_openinghours'),
('category_sparetime', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OpeningHoursSportcentreEntry',
fields=[
('openinghours_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='geomodels.OpeningHours')),
('sportcentre_entry', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='category_sparetime.SportscentreEntry')),
],
bases=('geomodels.openinghours',),
),
]
| [
"kath.ehrmann@web.de"
] | kath.ehrmann@web.de |
9f90ad6871dc92f935efaae550a6d2713f3e71dc | 657b9c271bd885e80c0d0bcab10dc7d191276972 | /setup.py | f134e4d921cbeb452c6b1d703f4a5b01ef1a885e | [
"BSD-3-Clause"
] | permissive | jscottcronin/collie | 0fa5fd66e3feaeb2038ea58aa635ebe5ec0da68d | 0d62ae1e0194a64b7f3841b40ea56a573ba95268 | refs/heads/main | 2023-07-15T04:52:45.560176 | 2021-07-15T14:47:36 | 2021-07-15T14:47:36 | 392,405,991 | 0 | 0 | BSD-3-Clause | 2021-08-03T17:50:39 | 2021-08-03T17:50:38 | null | UTF-8 | Python | false | false | 2,435 | py | from setuptools import find_packages, setup
with open('collie/_version.py') as version_file:
exec(version_file.read())
with open('README.md') as r:
readme = r.read()
with open('LICENSE') as l:
license = l.read()
setup(
name='collie',
version=__version__,
description='A PyTorch library for preparing, training, and evaluating deep learning hybrid recommender systems.',
long_description=readme+'\n\n\nLicense\n-------\n'+license,
long_description_content_type='text/markdown',
author='Nathan Jones',
url='https://github.com/ShopRunner/collie',
download_url='https://github.com/ShopRunner/collie',
license='BSD-3-Clause',
data_files=[('', ['LICENSE'])],
packages=find_packages(exclude=('tests', 'docs')),
keywords=['deep learning', 'pytorch', 'recommender'],
python_requires='>=3.6',
install_requires=[
'docstring_parser',
'fire',
'joblib',
'numpy',
'pandas',
'pytorch-lightning>=1.0.0', # ``collie`` library uses newer ``pytorch_lightning`` APIs
'scikit-learn',
'tables',
'torch',
'torchmetrics',
'tqdm',
],
extras_require={
'dev': [
'flake8',
'flake8-docstrings',
'flake8-import-order',
'ipython',
'ipywidgets',
'jupyterlab>=3.0.0',
'matplotlib',
'm2r2',
'pip-tools',
'pydocstyle<4.0.0',
'pytest',
'pytest-cov<3.0.0',
'sphinx-copybutton',
'sphinx-rtd-theme==0.5.2',
'widgetsnbextension',
]
},
project_urls={
'Documentation': 'https://collie.readthedocs.io/',
'Source Code': 'https://github.com/ShopRunner/collie',
},
classifiers=[
'Environment :: Console',
'Environment :: GPU',
'Natural Language :: English',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| [
"nathancooperjones@gmail.com"
] | nathancooperjones@gmail.com |
a33bdfad91b4225f463c646065d99aacc6792b36 | f0296b8a296756b7f8bcfb888702434afbb183ea | /dt-learn.py | 127043ce1b86c6b0c885f9ffef77b4d9faf97c2b | [] | no_license | mohandk/decision_trees | 1f51a90e20b15af6d2369ebba9774d88734c5b03 | 2712605a7b23d54d13ff5cc8099f9fcee99ffc25 | refs/heads/master | 2020-04-13T16:10:07.076588 | 2018-12-16T20:31:16 | 2018-12-16T20:31:16 | 163,314,353 | 0 | 0 | null | 2018-12-27T16:17:54 | 2018-12-27T16:17:54 | null | UTF-8 | Python | false | false | 19,841 | py | # MOHAN RAO DIVATE KODANDARAMA
# divatekodand@wisc.edu
# CS USERID: divate-kodanda-rama
# For Python 2 / 3 compatability
from __future__ import print_function
import sys
from scipy.io import arff
from io import StringIO
import numpy as np
import math
#import matplotlib.pyplot as plt
class Node:
""" Node holds the best split, childrens of the input data according to the best split
The compare method compares the feature value of the example with the feature value
of the best split.
"""
def __init__(self, feature_no, value, childrens, features_used, num_pos, num_neg):
self.feature_no = feature_no
self.value = value
# self.feature_type = feature_type
self.childrens = childrens
self.features_used = features_used
self.num_pos = num_pos
self.num_neg = num_neg
def compare(self, example):
# The compare method compares the feature value of the example with the feature value
# of the best split.
global train_attribute_types
val = example[self.feature_no]
if train_attribute_types[self.feature_no] == 'numeric':
return val <= self.value
else:
i = 0
for value in train_attribute_ranges[self.feature_no][1]:
if (value == val):
break;
else:
i += 1;
return i # Returns the index of the nominal value
class Leaf:
"""A Leaf node contains the class label
"""
def __init__(self, label, features_used, num_pos, num_neg):
self.label = label
self.features_used = features_used
self.num_pos = num_pos
self.num_neg = num_neg
class Split:
"""Split consists of a feature and a threshold for a numeric feature. For nominal Feature only feature is stored"""
def __init__(self, feature_no, feature_type, threshold):
self.feature_no = feature_no
self.feature_type = feature_type
self.threshold = threshold
#CODE FOR Q2 AND Q3
# def find_accuracy(tree, test_set):
# num_correct = 0
# num_incorrect = 0
# predictions = predict(test_set, tree)
# for i in range(test_set.size):
# #print(str(i + 1) + ": Actual: " + test_set[i][-1].decode('UTF-8') + " Predicted: " + str(predictions[i]))
# if (test_set[i][-1].decode('UTF-8') == predictions[i]):
# num_correct += 1
# else:
# num_incorrect += 1
# # print("Number of correctly classified: " + str(num_correct) + " Total number of test instances: " + str(test_set.size)) #,end="")
# t_accuracy = (num_correct / (test_set.size * 1))
# return t_accuracy
#
# def hw2_q2(dataset, testset):
# '''
# Generates plot for hw2 q2
# :param dataset:
# :return:
# '''
# # print(type(dataset))
# # print(type(dataset[1]))
# # print(dataset[1])
# n_dataset = len(dataset)
# global split_treshold, tree_str
# num_samples = 10
# data_sizes = [5,10,20,50,100]
# acc = []
# for val in data_sizes:
# n_train = int(round(n_dataset * (val / 100.0)))
# # print(n_train)
# accuracy = []
# for i in range(num_samples):
# if i > 0 and val == data_sizes[-1]:
# accuracy.append(accuracy_sample)
# continue
# p_dataset = np.random.choice(dataset, size=n_train, replace=False)
# # print(p_dataset)
# # print(type(p_dataset))
# f_used = []
# num_pos = 0
# for example in p_dataset:
# # print(type(example))
# # print(example)
# if example[-1].decode('UTF-8') == 'positive':
# num_pos += 1
# if num_pos >= (n_train - num_pos):
# label = 'positive'
# else:
# label = 'negative'
# tree = build_tree(p_dataset, f_used, label)
# # tree_str = ""
# # print_tree(tree)
# # print(tree_str)
# accuracy_sample = find_accuracy(tree, testset)
# accuracy.append(accuracy_sample)
# acc.append(accuracy)
# assert(len(acc) == len(data_sizes))
# np_acc = np.array(acc)
# np_acc = np_acc.T
# print(np_acc)
# mins = np_acc.min(0)
# maxes = np_acc.max(0)
# means = np_acc.mean(0)
# std = np_acc.std(0)
# # plt.errorbar(data_sizes, means, [means - mins, maxes - means], fmt='.k', ecolor='gray', lw=1, capsize=10)
# # plt.xlim(0, 105)
# # plt.ylim(0, 1)
# # plt.show()
# # input("Press Enter to continue...")
def is_stop_criteria_met(dataset, features_used, best_gain):
""" Checks if the stopping criteria is met
"""
global split_treshold
global num_features
global only_nominal
#if (dataset is None or features_used is None or best_gain is None):
#print("Null input to is_stop_criteria_met")
outputs = []
for data in dataset:
outputs.append(data[-1])
# print(outputs)
num_outputs = len(set(outputs))
if (num_outputs < 2): ## All examples belong to the same class
return True
elif (len(
dataset) < split_treshold): ##No of examples reaching this node is less than the threshold specified in the cmd
return True
##TODO: check the information gain criteria
elif (best_gain < 0): ##No feature has positive infomation gain
return True
elif (len(features_used) == num_features and only_nominal):
return True
else:
return False
def find_entropy(dataset):
"""Finds the entropy of the ouput RV for the given dataset
"""
#print("find entropy")
# print(dataset)
total_examples = len(dataset)
#if (total_examples == 0):
#print("E/W - Number of training examples to the find_entropy tree function is 0")
# print("dataset" , dataset)
num_pos = 0
num_neg = 0
for data in dataset:
if (data[-1] == b'negative'):
num_neg += 1
elif (data[-1] == b'positive'):
num_pos += 1
else:
None #print("E/W: class of an example is neither positive nor negative", data[-1])
p = (num_pos / (total_examples * 1.0))
#print("find entropy p = " + str(p))
if p == 0. or p == 1.:
return 0
else:
entropy = - (p * math.log(p, 2) + (1 - p) * math.log((1 - p), 2))
return entropy
def partition(dataset, split):
global train_attribute_ranges
data_partitions = []
if (split.feature_type == 'numeric'):
left_data = []
right_data = []
dataset_size = len(dataset)
for i in range(dataset_size):
if dataset[i][split.feature_no] <= split.threshold:
left_data.append(dataset[i])
else:
right_data.append(dataset[i])
data_partitions.append(left_data)
data_partitions.append(right_data)
else:
values = train_attribute_ranges[split.feature_no][1]
#print(type(values), type(train_attribute_ranges[split.feature_no][1]))
n_values = len(values)
for value in values:
one_partition = []
for data in dataset:
#print(type(data[split.feature_no]), type(value))
#print(data[split.feature_no].decode('UTF-8'), value)
if data[split.feature_no].decode('UTF-8') == value:
one_partition.append(data)
data_partitions.append(one_partition)
return data_partitions
def compute_info_gain(data_partitions, current_entropy):
info_gain = current_entropy
n_examples = 0
pos_count = 0
neg_count = 0
n_partitions = len(data_partitions)
for partition in data_partitions:
n_examples += len(partition)
#if n_examples == 0:
#print("n_examples = ", n_examples)
for partition in data_partitions:
n_partition = len(partition)
if n_partition != 0.:
partition_entropy = find_entropy(partition)
else:
partition_entropy = 0
info_gain -= (n_partition / (n_examples * 1.0)) * partition_entropy
return info_gain
def find_best_split(dataset, features_used):
"""Find the best split by iterating over every feature / value
and calculating the information gain."""
global num_features
global train_attributes
global train_attribute_types
#print("find best split")
max_gain = 0
best_feature_no = 0
best_feature_type = None
best_feature_threshold = None
current_entropy = find_entropy(dataset)
#print("find entropy succeded")
for feature_no in range(num_features):
unique_vals = set([data[feature_no] for data in dataset])
unique_vals = list(unique_vals) # Elements in sets cannot be sorted. Hence convert to list
if (train_attribute_types[feature_no] == 'numeric'):
unique_vals.sort()
n_vals = len(unique_vals)
for i in range(n_vals - 1):
mid = (unique_vals[i] + unique_vals[i + 1]) / 2.0
split = Split(feature_no, 'numeric', mid)
data_partitions = partition(dataset, split)
if len(data_partitions[0]) == 0 or len(data_partitions[1]) == 0:
continue
info_gain = compute_info_gain(data_partitions, current_entropy)
if (info_gain > max_gain):
max_gain = info_gain
best_feature_no = feature_no
best_feature_type = 'numeric'
best_feature_threshold = mid
else:
if len(features_used) != 0 and train_attributes[feature_no] in features_used:
continue
split = Split(feature_no, 'nominal', 'Nominal')
data_partitions = partition(dataset, split)
# TODO: Should I handle the case where one of the partition is null?
info_gain = compute_info_gain(data_partitions, current_entropy)
if (info_gain > max_gain):
max_gain = info_gain
best_feature_no = feature_no
best_feature_type = 'nominal'
best_feature_threshold = 'Nominal'
best_split = Split(best_feature_no, best_feature_type, best_feature_threshold)
return max_gain, best_split
def build_tree(data, features_used, parent_label):
"""
Builds the tree and returns the root node.
"""
total_examples = len(data)
## Number of Training examples = 0. This case should never arrive as this is handled in the while creating nodes
if (total_examples == 0):
#print("E/W - Number of training examples to the build tree function is 0")
return Leaf(parent_label, features_used, 0, 0)
else:
#print("find best split call - ", data, features_used)
best_gain, best_split = find_best_split(data, features_used)
#print("find best split succeded")
num_pos = 0
for example in data:
#print(type(example))
#print(example)
if example[-1].decode('UTF-8') == 'positive':
num_pos += 1
if num_pos > (total_examples - num_pos):
label = 'positive'
elif num_pos < (total_examples - num_pos):
label = 'negative'
else:
label = parent_label
if (is_stop_criteria_met(data, features_used, best_gain)):
leaf = Leaf(label, features_used, num_pos, (total_examples - num_pos))
return leaf
else:
features_used.append(best_split.feature_no)
data_partitions = partition(data, best_split)
nodes = []
for data_p in data_partitions:
node = build_tree(data_p, features_used, label)
nodes.append(node)
current_node = Node(best_split.feature_no, best_split.threshold, nodes, features_used, num_pos, (total_examples - num_pos))
return current_node
def print_tree(node, spacing=""):
"""print the tree"""
global train_attributes
global train_attribute_ranges
global tree_str
# Base case
if isinstance(node, Leaf):
#print(": ", node.label)
#tree_str = tree_str + ": " + node.label # + "\n"
return
# Print the question at this node
#print(spacing + str(node.feature_no) + str(node.value))
# Call this function recursively on the true branch
i = 0
#print(train_attribute_ranges)
for child in node.childrens:
if(train_attribute_ranges[node.feature_no][0] == 'numeric'):
if(i == 0):
#print(spacing + train_attributes[node.feature_no] + " <= " + str(node.value)),
dist = "[" + str(child.num_neg) + " " + str(child.num_pos) + "]"
tree_str = tree_str + "\n" + spacing + train_attributes[node.feature_no] + " <= " + "%.6f" % node.value + " " + dist
#tree_str = tree_str + "" + spacing + train_attributes[node.feature_no] + " <= " + str(node.value) + "\n"
if isinstance(node.childrens[0], Leaf):
tree_str = tree_str + ": " + node.childrens[0].label
else:
#print(spacing + train_attributes[node.feature_no] + " > " + str(node.value)),
dist = "[" + str(child.num_neg) + " " + str(child.num_pos) + "]"
tree_str = tree_str + "\n" + spacing + train_attributes[node.feature_no] + " > " + "%.6f" % node.value + " " + dist
#tree_str = tree_str + "" + spacing + train_attributes[node.feature_no] + " > " + str(node.value) + "\n"
if isinstance(node.childrens[1], Leaf):
tree_str = tree_str + ": " + node.childrens[1].label
else:
#print(spacing + train_attributes[node.feature_no] + " = " + train_attribute_ranges[node.feature_no][-1][i]),
dist = "[" + str(child.num_neg) + " " + str(child.num_pos) + "]"
tree_str = tree_str + "\n" + spacing + train_attributes[node.feature_no] + " = " + train_attribute_ranges[node.feature_no][-1][i] + " " + dist
#tree_str = tree_str + "" + spacing + train_attributes[node.feature_no] + " = " + train_attribute_ranges[node.feature_no][-1][i] + "\n"
if isinstance(node.childrens[i], Leaf):
tree_str = tree_str + ": " + node.childrens[i].label
i += 1
print_tree(child, spacing + "|\t"),
def predict(dataset, tree):
global train_attribute_types
global train_attribute_ranges
global train_attributes
labels = []
for data in dataset:
node = tree
while not isinstance(node, Leaf):
if train_attribute_types[node.feature_no] == 'nominal':
i = 0
for val in train_attribute_ranges[node.feature_no][-1]:
#print(data[node.feature_no])
#print(type(data[node.feature_no]))
#print(val)
#print(type(val))
#print(data[node.feature_no].decode('UTF-8'))
if data[node.feature_no].decode('UTF-8') == val:
node = node.childrens[i]
# if isinstance(node, Leaf):
# print(node.label)
# else:
# print(train_attributes[node.feature_no])
break;
i += 1
else:
#print(data[node.feature_no])
#print(type(data[node.feature_no]))
#print(node.value)
#print(type(node.value))
if data[node.feature_no] <= node.value:
node = node.childrens[0]
# if isinstance(node, Leaf):
# print(node.label)
# else:
# print(train_attributes[node.feature_no])
else:
node = node.childrens[1]
# if isinstance(node, Leaf):
# print(node.label)
# else:
# print(train_attributes[node.feature_no])
labels.append(node.label)
return labels
def main():
'''
Loads the data
'''
##PROCESS THE ARGURMENT
# print sys.argv
num_args = len(sys.argv)
if (num_args < 4):
print("Wrong Usage - Script takes 3 arguments")
print("Example Usage- python dt-learn.py heart_train.arff heart_test.arff 2")
exit(0)
train_set_filename = sys.argv[1]
test_set_filename = sys.argv[2]
global split_treshold
split_treshold = int(sys.argv[3])
# print train_set_filename, test_set_filename, split_treshold
##LOAD THE DATA
global train_attributes
global train_attribute_types
global num_examples
global num_features
global train_attribute_ranges
train_set_file = open(train_set_filename, 'r')
train_set, train_meta = arff.loadarff(train_set_file)
num_examples = len(train_set)
num_features = len(train_set[1]) - 1
# print(train_set)
# print train_set.size
train_attributes = train_meta.names() # Returns a list
train_attribute_types = train_meta.types() # Returns a list
#print(train_attribute_types)
# To find if there are only nominal features - this is one of the stopping criteria when there are only nominal fetures
global only_nominal
only_nominal = False
set_of_train_attr_types = set(train_attribute_types)
# print(set_of_train_attr_types)
if (2 != len(set_of_train_attr_types)):
only_nominal = True
#print("Nominal features only")
train_attribute_ranges = []
num_attributes= len(train_attributes)
for i in range(num_attributes):
#print(type(train_meta.__getitem__(train_attributes[i])))
train_attribute_ranges.append(train_meta.__getitem__(train_attributes[i]))
#print(train_attribute_ranges)
#print('negative' == train_attribute_ranges[-1][-1][0])
# print train_attributes
# print train_attribute_types
test_set_file = open(test_set_filename, 'r')
test_set, test_meta = arff.loadarff(test_set_file)
test_attributes = test_meta.names() # Returns a list
test_attribute_types = test_meta.types() # Returns a list
test_attribute_ranges = []
for i in range(num_attributes):
test_attribute_ranges.append(test_meta.__getitem__(test_attributes[i]))
#print(test_attributes, test_attribute_types)
## BUILD THE TREE AND PRINT THE RESULT ON THE TEST SET
total_examples = len(train_set)
num_pos = 0
for example in train_set:
if example[-1] == 'positive':
num_pos += 1
if num_pos >= (total_examples - num_pos):
label = 'positive'
else:
label = 'negative'
features_used = []
#hw2_q2(train_set, test_set)
tree = build_tree(train_set, features_used, label)
global tree_str
tree_str = ""
print_tree(tree)
tree_str = tree_str[1:]
print(tree_str)
num_correct = 0
num_incorrect = 0
predictions = predict(test_set, tree)
print("<Predictions for the Test Set Instances>")
#print(test_set.size)
# predictions = range(test_set.size)
for i in range(test_set.size):
print(str(i + 1) + ": Actual: " + test_set[i][-1].decode('UTF-8') + " Predicted: " + str(predictions[i]))
if (test_set[i][-1].decode('UTF-8') == predictions[i]):
num_correct += 1
else:
num_incorrect += 1
print("Number of correctly classified: " + str(num_correct) + " Total number of test instances: " + str(test_set.size)) #,end="")
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
a040a4045a98a04290859770134342c8c2fb5428 | 938e644b819d5411709b9f24ad66ecea6539fe08 | /tencent/tencent/pipelines.py | d59576a52dc7fd1be375ff00b87e76cf3f619621 | [] | no_license | ryanzicky/1 | 7b7d0380bf27b80691f60c93161f5f9d6a75fcea | 777e6ffc65c19f07a4e128940b81742a93a2982c | refs/heads/master | 2020-12-28T17:00:58.849208 | 2020-02-05T09:39:04 | 2020-02-05T09:39:04 | 238,415,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import logging
class TencentPipeline(object):
def process_item(self, item, spider):
logging.warning(item)
return item
| [
"zhourui@tuhu.cn"
] | zhourui@tuhu.cn |
9e703e905db62e8552e57941362b9d9bdae8b919 | 0cc98111e4ef7f747e2d3bc207dc53a9097dce68 | /CodeHs/2.Conditionals/1.Booleans/4.1.4 Fix This Program.py | a6f914d66a37fd530a77f2f41a74068745f1ee63 | [] | no_license | tw-alexander/CodeHS-Intro_To_Computer_Science-Answers-Python | 4bb3587feb69be3ef7817b4fbe778352443c7072 | 4b67950cf33684773ce78817dc10986858469a68 | refs/heads/master | 2022-02-22T01:23:23.147279 | 2019-09-21T23:36:11 | 2019-09-21T23:36:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | brought_food = True
brought_drink = False
# These lines don't work! Fix them so that they do.
print "Did the person bring food? " + str(brought_food)
print "Did the person bring a drink? " + str(brought_drink)
| [
"noreply@github.com"
] | noreply@github.com |
599f4edbf8bbbcf5be1ba76d41791b9964071018 | 35a6f5a26ea97ebed8ab34619a8eec51719d2cc0 | /Python_Basic/17 文件操作/5 seek函数.py | 115eb71e6b1003cafcc78f9afeea357211ceaa76 | [] | no_license | PandaCoding2020/pythonProject | c3644eda22d993b3b866564384ed10441786e6c5 | 26f8a1e7fbe22bab7542d441014edb595da39625 | refs/heads/master | 2023-02-25T14:52:13.542434 | 2021-02-03T13:42:41 | 2021-02-03T13:42:41 | 331,318,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | """
语法:文件对象.seek(偏移量,超始位置) 0开头,1当前 2结尾
目标:
1.r改变读取文件指针:改变读取数据开始位置或把文件指针放结尾(无法读取数据)
2.a改变读取文件指针,做到可以读到数据
"""
# 1.1.改变读取数据开始位置
# f.seek(2, 0)
# 1.2.把文件指针放到结尾(无法读取数据)
# f.seek(0, 2)
# f = open('test.txt', 'r+')
# f.seek(2, 0)
# con = f.read()
# print(con)
#
# f.close()
# 2.把文件指针放到结尾(无法读取数据)
f = open('test.txt', 'a+')
f.seek(0, 0)
con = f.read()
print(con)
f.close()
| [
"gzupanda@outlook.com"
] | gzupanda@outlook.com |
8b5018df8dddf952e39d64c225a5ffd173900976 | a6f4c776037bcb22bc72603dc7433dbb1f11e2b2 | /dblog/settings/common.py | 03e08af627f281a14694d0160c263922a0d66c64 | [
"MIT"
] | permissive | zxins/dblog | bccf14cedea7f4bf4a1e357eb60231704781130c | 8fe92cc6bec628c2e8869ab625e14b8c3da547f1 | refs/heads/master | 2022-12-13T08:16:35.331644 | 2020-12-01T03:49:59 | 2020-12-01T03:49:59 | 243,273,814 | 0 | 0 | MIT | 2022-12-08T03:53:35 | 2020-02-26T13:50:01 | CSS | UTF-8 | Python | false | false | 3,717 | py | """
Django settings for dblog project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&7jc^0yyonj2vs8$*rt6z*h8+3zdb%^=ta!)$df&zeqks@ikpg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '132.232.3.242']
# Application definition
INSTALLED_APPS = [
'bootstrap_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pure_pagination', # 分页
'blog.apps.BlogConfig',
'comments.apps.CommentsConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# django-pure-pagination 分页设置
PAGINATION_SETTINGS = {
'PAGE_RANGE_DISPLAYED': 4, # 分页条当前页前后应该显示的总页数(两边均匀分布,因此要设置为偶数),
'MARGIN_PAGES_DISPLAYED': 2, # 分页条开头和结尾显示的页数
'SHOW_FIRST_PAGE_WHEN_INVALID': True, # 当请求了不存在页,显示第一页
}
| [
"624042309@qq.com"
] | 624042309@qq.com |
f428c560237217ad3f5dd49edbabd5734a5b4eff | 0a679896fbe96a8a0a59ad9f4f55edb4aa044a93 | /Duplicate File Handler/task/handler.py | 040a40e81fc3f6eef361f3690d7a85ad20d01559 | [] | no_license | TogrulAga/Duplicate-File-Handler | 5b7bd9c9508ae3ee96751bc3e56ebaccc44c46f9 | 66fef381572c0e6697330463b0b720c2dbca82e6 | refs/heads/master | 2023-06-30T07:07:24.524591 | 2021-08-06T15:47:00 | 2021-08-06T15:47:00 | 393,424,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,500 | py | import os
import argparse
import hashlib
class FileHandler:
def __init__(self, directory):
self.directory = directory
self.file_format = None
self.sorting_option = None
self.files_dict = dict()
self.dict_items = None
self.numbered_dict = dict()
self.get_format()
self.get_sorting_option()
self.walk_dir()
self.list_same_sized_files()
self.check_duplicates()
self.delete_files()
def get_format(self):
self.file_format = input("Enter file format:\n")
def get_sorting_option(self):
print("Size sorting options:")
print("1. Descending")
print("2. Ascending\n")
while True:
self.sorting_option = int(input("Enter a sorting option:\n"))
print()
if self.sorting_option not in (1, 2):
print("\nWrong option\n")
else:
break
def walk_dir(self):
for root, directories, filenames in os.walk(self.directory):
for file in filenames:
if self.file_format != "":
if self.file_format != os.path.splitext(file)[-1].split(".")[-1]:
continue
file_path = os.path.join(root, file)
file_size = os.path.getsize(file_path)
if file_size in self.files_dict.keys():
self.files_dict[file_size].append(file_path)
else:
self.files_dict[file_size] = [file_path]
def list_same_sized_files(self):
if self.sorting_option == 1:
dict_items = list(reversed(sorted(self.files_dict.items())))
elif self.sorting_option == 2:
dict_items = sorted(self.files_dict.items())
for size, files in dict_items:
print(f"{size} bytes")
for file in files:
print(file)
print()
self.dict_items = dict_items
def check_duplicates(self):
while True:
answer = input("Check for duplicates?\n")
if answer not in ("yes", "no"):
continue
else:
break
if answer == "no":
return
else:
n_duplicate = 1
for size, files in self.dict_items:
print(f"\n{size} bytes")
hash_dict = dict()
for file in files:
hash_maker = hashlib.md5()
with open(file, "rb") as f:
hash_maker.update(f.read())
if hash_maker.hexdigest() not in hash_dict.keys():
hash_dict[hash_maker.hexdigest()] = [file]
else:
hash_dict[hash_maker.hexdigest()].append(file)
for key, values in hash_dict.items():
if len(values) > 1:
print(f"Hash: {key}")
for value in values:
print(f"{n_duplicate}. {value}")
self.numbered_dict[n_duplicate] = value
n_duplicate += 1
def delete_files(self):
while True:
answer = input("Delete files?\n")
if answer not in ("yes", "no"):
continue
else:
break
if answer == "no":
return
else:
while True:
answer = input("Enter file numbers to delete:\n")
try:
files_to_delete = list(map(int, answer.split()))
if len(files_to_delete) == 0:
raise ValueError
if any(n not in self.numbered_dict.keys() for n in files_to_delete):
raise ValueError
break
except ValueError:
print("\nWrong format\n")
freed_space = 0
for file in files_to_delete:
freed_space += os.path.getsize(self.numbered_dict[file])
os.remove(self.numbered_dict[file])
print(f"Total freed up space: {freed_space} bytes")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("directory").required = False
args = parser.parse_args()
if args.directory is None:
print("Directory is not specified")
file_handler = FileHandler(args.directory)
| [
"toghrul.aghakishiyev@ericsson.com"
] | toghrul.aghakishiyev@ericsson.com |
ca0312e44c689d8a119737d9102edca66c6d0e32 | 757433be241afbff1c138d77daf13397f858aef3 | /scorpio/urls.py | 166247c53f8b21e7f1bf3184baad8bf10b8db329 | [
"MIT"
] | permissive | RockefellerArchiveCenter/scorpio | 1f9d152bb440bb98c007f652fa644602e3b8b483 | f308cac3880ba9008d3aadfdc66a4062d4d27492 | refs/heads/base | 2023-08-20T22:34:32.085492 | 2023-08-07T17:00:58 | 2023-08-07T17:00:58 | 215,400,734 | 0 | 1 | MIT | 2023-09-08T21:09:13 | 2019-10-15T21:33:10 | Python | UTF-8 | Python | false | false | 1,601 | py | """scorpio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from asterism.views import PingView
from django.contrib import admin
from django.urls import include, re_path
from rest_framework.schemas import get_schema_view
from indexer.views import (IndexAddView, IndexDeleteView, IndexResetView,
IndexRunViewSet)
from .routers import ScorpioRouter
router = ScorpioRouter()
router.register(r'index-runs', IndexRunViewSet, 'indexrun')
schema_view = get_schema_view(
title="Scorpio API",
description="Endpoints for Scorpio microservice application."
)
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^index/add/', IndexAddView.as_view(), name='index-add'),
re_path(r'^index/delete/', IndexDeleteView.as_view(), name='index-delete'),
re_path(r'^index/reset/', IndexResetView.as_view(), name='index-reset'),
re_path(r'^status/', PingView.as_view(), name='ping'),
re_path(r'^schema/', schema_view, name='schema'),
re_path(r'^', include(router.urls)),
]
| [
"helrond@hotmail.com"
] | helrond@hotmail.com |
972b7afb8148322afbfcf190ba1801302e12ae61 | b1b5d91bae771d77dd4c91f99427ec07e520bc9c | /test_sort.py | 3a2cd800f0d83b6672250f74f0cc85329a7b5e93 | [] | no_license | envhyf/pyWRFChemEmiss | 7ca6b6862ca3493656d8f471cca57abb27928c29 | 95cf0c7e391d6cbff4e8b04111edca4f06993a7f | refs/heads/master | 2021-01-18T08:04:29.514097 | 2014-06-18T05:37:39 | 2014-06-18T05:37:39 | 41,543,599 | 3 | 0 | null | 2015-08-28T11:22:12 | 2015-08-28T11:22:12 | null | UTF-8 | Python | false | false | 937 | py | from random import randint
class PointData():
def __init__(self, x, y, value):
self.x = x
self.y = y
self.value = value
def __str__(self):
return '{0}:{1}'.format(self.x, self.y)
def compare_pointdata(a, b):
if (a.x == b.x) and (a.y == b.y):
return 0
if (a.y < b.y):
return -1
if (a.y == b.y) and (a.x < b.x):
return -1
return 1
def compare_pointdata_f(a, b):
err = 0.0001
if (abs(a.x - b.x) <= err) and (abs(a.y - b.y) <= err):
return 0
if ((b.y - a.y) >= err):
return -1
if (abs(a.y - b.y) <= err) and ((b.x - a.x) >= err):
return -1
return 1
list_data = []
for i in range(100):
for j in range(100):
list_data.append(PointData(randint(1,100), randint(1,100), i + j))
list_data.sort(compare_pointdata_f)
for el in list_data:
print el
| [
"arif@sainsmograf.com"
] | arif@sainsmograf.com |
09a04492749d62dae35061868dd2df07b8bd633a | d12bf1138cc8173ceaa4b2d3c876610a1b9dd158 | /myvenv/bin/django-admin | 8b642c18f42e8a7c16615b913b36ccc4db25c373 | [] | no_license | Donsworkout/my-first-blog | aa1565a16b8e5bf3d2348b5c821b9a24e31a16a9 | d7b87d5a6b871c6e8f2fb99c695424c1f95f42f6 | refs/heads/master | 2021-04-15T17:42:57.158790 | 2018-03-25T06:50:34 | 2018-03-25T06:50:34 | 126,668,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | #!/Users/donsdev/python-workspace/speedblog/myvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"dickings@likelion.org"
] | dickings@likelion.org | |
1126f4f0e430b9f937a96a7b9cf597dd013a5a0d | aff0ac91f26ff94ac0180f5115839fbea57ab4bd | /primitives/line.py | 77ee019510b07442c18ccb29631c076e371a73f8 | [] | no_license | fizyk20/spacetime-graph | dc539e04cb984e14151b8e32096c76986def56db | f352ed34942c1944f7456d639390b41cbb885a75 | refs/heads/master | 2021-05-06T18:58:26.999095 | 2017-11-25T17:09:30 | 2017-11-25T17:09:30 | 111,997,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | from .generic import GenericObject
from OpenGL.GL import *
from math import sqrt
import numpy
from numpy import linalg
class Line(GenericObject):
def __init__(self, p1=(0,0), p2=(0,0), **kwargs):
super(Line, self).__init__(**kwargs)
if not isinstance(p1, tuple) or len(p1) != 2:
raise Exception
if not isinstance(p2, tuple) or len(p2) != 2:
raise Exception
self.p1 = numpy.array(p1, 'f')
self.p2 = numpy.array(p2, 'f')
def _draw(self, **kwargs):
glBegin(GL_LINES)
glVertex(self.p1[1], self.p1[0], 0.0)
glVertex(self.p2[1], self.p2[0], 0.0)
glEnd() | [
"fizyk20@gmail.com"
] | fizyk20@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.