mirror of
https://github.com/cooperhammond/irs.git
synced 2025-01-22 06:30:58 +00:00
Fixed bug with album, made everything more readable and added a console gui.
This commit is contained in:
parent
7453a28ff8
commit
29635300a1
|
@ -1,6 +1,33 @@
|
|||
#!/usr/bin python
|
||||
import argparse
|
||||
from os import system
|
||||
from sys import exit
|
||||
from .manage import *
|
||||
from .utils import *
|
||||
|
||||
def console():
|
||||
system("clear")
|
||||
media = None
|
||||
while type(media) is not int:
|
||||
print (bc.HEADER)
|
||||
print ("What type of media would you like to download?")
|
||||
print ("\t1) Song\n\t2) Album")
|
||||
try:
|
||||
media = int(input(bc.YELLOW + bc.BOLD + ":: " + bc.ENDC))
|
||||
except ValueError:
|
||||
print (bc.FAIL + "\nPlease enter 1 or 2." + bc.ENDC)
|
||||
|
||||
print (bc.HEADER + "Artist of song/album ", end="")
|
||||
artist = input(bc.BOLD + bc.YELLOW + ": " + bc.ENDC)
|
||||
|
||||
if media == 1:
|
||||
print (bc.HEADER + "Song you would like to download ", end="")
|
||||
song = input(bc.BOLD + bc.YELLOW + ": " + bc.ENDC)
|
||||
rip_mp3(song, artist)
|
||||
elif media == 2:
|
||||
print (bc.HEADER + "Album you would like to download ", end="")
|
||||
album = input(bc.BOLD + bc.YELLOW + ": " + bc.ENDC)
|
||||
rip_album(album, artist)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
@ -15,10 +42,10 @@ def main():
|
|||
if args.artist and not (args.album or args.song):
|
||||
print ("usage: __init__.py [-h] [-a ARTIST] [-A ALBUM | -s SONG] \n\
|
||||
error: must specify -A/--album or -s/--song if specifying -a/--artist")
|
||||
sys.exit(1)
|
||||
exit(1)
|
||||
|
||||
elif not args.artist:
|
||||
console
|
||||
console()
|
||||
|
||||
elif args.artist:
|
||||
if args.album:
|
||||
|
|
|
@ -1,46 +1,68 @@
|
|||
import urllib.request, urllib.parse, re, sys, os, requests
|
||||
# Powered by:
|
||||
import youtube_dl
|
||||
|
||||
# Info getting
|
||||
from urllib.request import urlopen
|
||||
from urllib.parse import urlencode
|
||||
|
||||
# Info parsing
|
||||
from re import findall
|
||||
import os, json
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
# Local utils
|
||||
from .utils import *
|
||||
from .metadata import *
|
||||
|
||||
def find_mp3(song, artist):
|
||||
print ('\n' + color(song, ["BOLD", "UNDERLINE"]) + ' by ' + color(artist, ["BOLD", "UNDERLINE"]) + '\n')
|
||||
|
||||
search_terms = song + " " + artist
|
||||
print ("\"%s\" by %s" % (song, artist))
|
||||
query_string = urllib.parse.urlencode({"search_query" : (search_terms)})
|
||||
html_content = urllib.request.urlopen("http://www.youtube.com/results?" + query_string)
|
||||
search_results = re.findall(r'href=\"\/watch\?v=(.{11})', html_content.read().decode())
|
||||
query_string = urlencode({"search_query" : (search_terms)})
|
||||
|
||||
html_content = urlopen("http://www.youtube.com/results?" + query_string)
|
||||
search_results = findall(r'href=\"\/watch\?v=(.{11})', html_content.read().decode())
|
||||
|
||||
in_title = False
|
||||
i = -1
|
||||
given_up_score = 0
|
||||
|
||||
while in_title == False:
|
||||
i += 1
|
||||
given_up_score += 1
|
||||
|
||||
if given_up_score >= 10:
|
||||
in_title = True
|
||||
|
||||
audio_url = ("http://www.youtube.com/watch?v=" + search_results[i])
|
||||
title = strip_special_chars((BeautifulSoup(urllib.request.urlopen(audio_url), 'html.parser')).title.string.lower())
|
||||
title = strip_special_chars((BeautifulSoup(urlopen(audio_url), 'html.parser')).title.string.lower())
|
||||
song_title = song.lower().split("/")
|
||||
|
||||
for song in song_title:
|
||||
if strip_special_chars(song) in strip_special_chars(title):
|
||||
in_title = True
|
||||
|
||||
return search_results[i]
|
||||
|
||||
def rip_album(album, artist, tried=False, search="album"):
|
||||
def rip_album(album, artist, tried=False, search="album", album_url=None):
|
||||
visible_texts = search_google(album, artist, search)
|
||||
try:
|
||||
songs = []
|
||||
num = True
|
||||
|
||||
for i, j in enumerate(visible_texts):
|
||||
if 'Songs' in j:
|
||||
if visible_texts[i + 1] == "1":
|
||||
indexed = i
|
||||
|
||||
while num == True:
|
||||
try:
|
||||
|
||||
if type(int(visible_texts[indexed])) is int:
|
||||
a = visible_texts[indexed + 1]
|
||||
songs.append(a)
|
||||
indexed += 1
|
||||
|
||||
except:
|
||||
indexed += 1
|
||||
if indexed >= 1000:
|
||||
|
@ -48,21 +70,25 @@ def rip_album(album, artist, tried=False, search="album"):
|
|||
else:
|
||||
pass
|
||||
|
||||
if album_url != None:
|
||||
album_url = get_albumart_url(album, artist)
|
||||
|
||||
for i, j in enumerate(songs):
|
||||
rip_mp3(j, artist, part_of_album=True, album=album, tracknum=i + 1)
|
||||
rip_mp3(j, artist, part_of_album=True, album=album, tracknum=i + 1, album_url=album_url)
|
||||
|
||||
except Exception as e:
|
||||
if str(e) == "local variable 'indexed' referenced before assignment" or str(e) == 'list index out of range':
|
||||
if tried != True:
|
||||
print ("%s Trying to find album ..." % color('[*]','OKBLUE'))
|
||||
print (bc.OKBLUE + "Trying to find album ..." + bc.ENDC)
|
||||
rip_album(album, artist, tried=True, search="")
|
||||
else:
|
||||
print ("%s Could not find album '%s'" % (color('[-]','FAIL'), album))
|
||||
print (bc.FAIL + 'Could not find album "%s"' % album + bc.ENDC)
|
||||
else:
|
||||
print ("%s There was an error with getting the contents \
|
||||
of the album '%s'" % (color('[-]','FAIL'), album))
|
||||
print (bc.FAIL + 'There was an error getting the contents of "%s":' % album + bc.ENDC)
|
||||
print (e)
|
||||
|
||||
def rip_mp3(song, artist, part_of_album=False, album="", tracknum=""):
|
||||
|
||||
def rip_mp3(song, artist, part_of_album=False, album="", tracknum="", album_url=None):
|
||||
audio_code = find_mp3(song, artist)
|
||||
filename = strip_special_chars(song) + ".mp3"
|
||||
ydl_opts = {
|
||||
|
@ -92,4 +118,7 @@ def rip_mp3(song, artist, part_of_album=False, album="", tracknum=""):
|
|||
if audio_code in file:
|
||||
os.rename(file, location + "/" + filename)
|
||||
|
||||
parse_metadata(song, artist, location, filename, tracknum=tracknum, album=album)
|
||||
parse_metadata(song, artist, location, filename, tracknum=tracknum, album=album, album_url=album_url)
|
||||
|
||||
print (color(song, ["BOLD", "UNDERLINE"]) + bc.OKGREEN + '" downloaded successfully!'+ bc.ENDC)
|
||||
print ("")
|
||||
|
|
118
irs/metadata.py
118
irs/metadata.py
|
@ -1,64 +1,84 @@
|
|||
import mutagen.id3, mutagen.easyid3, mutagen.mp3
|
||||
import urllib.request, urllib.parse
|
||||
# MP3 Metadata editing
|
||||
from mutagen.mp3 import MP3, EasyMP3
|
||||
from mutagen.easyid3 import EasyID3
|
||||
from mutagen.id3 import ID3, APIC
|
||||
|
||||
# Info getting
|
||||
from urllib.parse import quote_plus, quote
|
||||
from urllib.request import urlopen, Request
|
||||
|
||||
# Info parsing
|
||||
import json
|
||||
from re import match
|
||||
from bs4 import BeautifulSoup
|
||||
import requests
|
||||
|
||||
# Local utils
|
||||
from .utils import *
|
||||
import re
|
||||
|
||||
def search_google(song, artist, search_terms=""):
|
||||
|
||||
def visible(element):
|
||||
if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
|
||||
return False
|
||||
elif re.match('<!--.*-->', str(element)):
|
||||
elif match('<!--.*-->', str(element)):
|
||||
return False
|
||||
return True
|
||||
|
||||
string = "%s %s %s" % (song, artist, search_terms)
|
||||
filename = 'http://www.google.com/search?q=' + urllib.parse.quote_plus(string)
|
||||
filename = 'http://www.google.com/search?q=' + quote_plus(string)
|
||||
hdr = {
|
||||
'User-Agent':'Mozilla/5.0',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||
}
|
||||
texts = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(filename, \
|
||||
|
||||
texts = BeautifulSoup(urlopen(Request(filename, \
|
||||
headers=hdr)).read(), 'html.parser').findAll(text=True)
|
||||
|
||||
return list(filter(visible, texts))
|
||||
|
||||
def parse_metadata(song, artist, location, filename, tracknum="", album=""):
|
||||
def parse_metadata(song, artist, location, filename, tracknum="", album="", album_url=None):
|
||||
googled = search_google(song, artist)
|
||||
mp3file = mutagen.mp3.MP3("%s/%s" % (location, filename), ID3=mutagen.easyid3.EasyID3)
|
||||
print ("%s Metadata parsing:" % color('[+]','OKBLUE'))
|
||||
|
||||
mp3file = MP3("%s/%s" % (location, filename), ID3=EasyID3)
|
||||
|
||||
# Song title
|
||||
mp3file['title'] = song
|
||||
mp3file.save()
|
||||
print ("\t%s Title parsed: " % color('[+]','OKGREEN') + mp3file['title'][0])
|
||||
|
||||
print("")
|
||||
print (bc.OKGREEN + "Title parsed: " + bc.ENDC + mp3file['title'][0])
|
||||
|
||||
# Artist
|
||||
mp3file['artist'] = artist
|
||||
mp3file.save()
|
||||
print ("\t%s Artist parsed: " % color('[+]','OKGREEN') + mp3file['artist'][0])
|
||||
|
||||
print (bc.OKGREEN + "Artist parsed: " + bc.ENDC + mp3file['artist'][0])
|
||||
|
||||
# Album
|
||||
if album == "":
|
||||
for i, j in enumerate(googled):
|
||||
if "Album:" in j:
|
||||
album = (googled[i + 1])
|
||||
try:
|
||||
if album:
|
||||
mp3file['album'] = album
|
||||
print ("\t%s Album parsed: " % color('[+]','OKGREEN') + mp3file['album'][0])
|
||||
except Exception:
|
||||
mp3file['album'] = album
|
||||
print ("\t%s Album not parsed" % color('[-]','FAIL'))
|
||||
print (bc.OKGREEN + "Album parsed: " + bc.ENDC + mp3file['album'][0])
|
||||
else:
|
||||
print (bc.FAIL + "Album not parsed.")
|
||||
|
||||
mp3file.save()
|
||||
|
||||
# Release date
|
||||
for i, j in enumerate(googled):
|
||||
if "Released:" in j:
|
||||
date = (googled[i + 1])
|
||||
|
||||
try:
|
||||
mp3file['date'] = date
|
||||
print ("\t%s Release date parsed" % color('[+]','OKGREEN'))
|
||||
print (bc.OKGREEN + "Release date parsed: " + bc.ENDC + mp3file['date'][0])
|
||||
except Exception:
|
||||
mp3file['date'] = ""
|
||||
pass
|
||||
|
||||
mp3file.save()
|
||||
|
||||
# Track number
|
||||
|
@ -67,46 +87,54 @@ def parse_metadata(song, artist, location, filename, tracknum="", album=""):
|
|||
mp3file.save()
|
||||
|
||||
# Album art
|
||||
if mp3file['album'][0] != "":
|
||||
try:
|
||||
embed_mp3(get_albumart_url(album, artist), "%s/%s" % (location, filename))
|
||||
print ("\t%s Album art parsed" % color('[+]','OKGREEN'))
|
||||
except Exception as e:
|
||||
print ("\t%s Album art not parsed" % color('[-]','FAIL'))
|
||||
if album_url == None:
|
||||
album_url = get_albumart_url(album, artist)
|
||||
embed_mp3(album_url, location + "/" + filename)
|
||||
else:
|
||||
embed_mp3(album_url, location + "/" + filename)
|
||||
|
||||
print ("\n%s \"%s\" downloaded successfully!\n" % (color('[+]','OKGREEN'), song))
|
||||
print (bc.OKGREEN + "Album art parsed: " + bc.ENDC + album_url)
|
||||
|
||||
except Exception as e:
|
||||
print (e)
|
||||
print (bc.FAIL + "Album art not parsed." + bc.ENDC)
|
||||
|
||||
def embed_mp3(albumart_url, song_location):
|
||||
img = urllib.request.urlopen(albumart_url)
|
||||
audio = mutagen.mp3.EasyMP3(song_location, ID3=mutagen.id3.ID3)
|
||||
image = urlopen(albumart_url)
|
||||
audio = EasyMP3(song_location, ID3=ID3)
|
||||
|
||||
try:
|
||||
audio.add_tags()
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
audio.tags.add(
|
||||
mutagen.id3.APIC(
|
||||
encoding = 3, # UTF-8
|
||||
APIC(
|
||||
encoding = 3,
|
||||
mime = 'image/png',
|
||||
type = 3, # 3 is for album art
|
||||
type = 3,
|
||||
desc = 'Cover',
|
||||
data = img.read() # Reads and adds album art
|
||||
data = image.read()
|
||||
)
|
||||
)
|
||||
audio.save()
|
||||
|
||||
def get_albumart_url(album, artist):
|
||||
try:
|
||||
search = "%s %s" % (album, artist)
|
||||
url = "http://www.seekacover.com/cd/" + urllib.parse.quote_plus(search)
|
||||
soup = BeautifulSoup(requests.get(url).text, 'html.parser')
|
||||
done = False
|
||||
for img in soup.findAll('img'):
|
||||
if done == False:
|
||||
try:
|
||||
if search.lower() in img['title'].lower():
|
||||
return img['src']
|
||||
done = True
|
||||
except Exception as e:
|
||||
pass
|
||||
except Exception as e:
|
||||
pass
|
||||
album = "%s %s Album Art" % (artist, album)
|
||||
url = ("https://www.google.com/search?q=" + quote(album.encode('utf-8')) + "&source=lnms&tbm=isch")
|
||||
header = {
|
||||
'User-Agent':
|
||||
'''
|
||||
Mozilla/5.0 (Windows NT 6.1; WOW64)
|
||||
AppleWebKit/537.36 (KHTML,like Gecko)
|
||||
Chrome/43.0.2357.134 Safari/537.36
|
||||
'''
|
||||
}
|
||||
|
||||
soup = BeautifulSoup(urlopen(Request(url, headers=header)), "html.parser")
|
||||
|
||||
albumart_div = soup.find("div", {"class": "rg_meta"})
|
||||
albumart = json.loads(albumart_div.text)["ou"]
|
||||
|
||||
return albumart
|
||||
|
|
23
irs/utils.py
23
irs/utils.py
|
@ -4,8 +4,21 @@ def strip_special_chars(string):
|
|||
string.replace(char, "")
|
||||
return string
|
||||
|
||||
def color(text, type):
|
||||
types = {'HEADER': '\033[95m', 'OKBLUE': '\033[94m', 'OKGREEN': '\033[92m',
|
||||
'WARNING': '\033[93m','FAIL': '\033[91m','ENDC': '\033[0m','BOLD': '\033[1m'
|
||||
,'UNDERLINE': '\033[4m'}
|
||||
return types[type] + text + types['ENDC']
|
||||
class bc:
|
||||
HEADER = '\033[95m'
|
||||
OKBLUE = '\033[94m'
|
||||
OKGREEN = '\033[32m'
|
||||
WARNING = '\033[93m'
|
||||
FAIL = '\033[91m'
|
||||
ENDC = '\033[0m'
|
||||
BOLD = '\033[1m'
|
||||
UNDERLINE = '\033[4m'
|
||||
GRAY = '\033[30m'
|
||||
YELLOW = '\033[33m'
|
||||
|
||||
def color(text, colors=[]):
|
||||
color_string = ""
|
||||
for color in colors:
|
||||
color_string += "bc.%s + " % color
|
||||
color_string = color_string[:-2]
|
||||
return (eval(color_string) + text + bc.ENDC)
|
||||
|
|
Loading…
Reference in a new issue