mirror of
https://github.com/cooperhammond/irs.git
synced 2025-01-24 06:50:58 +00:00
More error catching and a higher success rate in album finding
This commit is contained in:
parent
6709388022
commit
0ffe3803c6
204
irs.py
204
irs.py
|
@ -27,20 +27,23 @@ from bs4 import BeautifulSoup
|
||||||
import youtube_dl, mutagen
|
import youtube_dl, mutagen
|
||||||
|
|
||||||
def download_album_art(album, band):
|
def download_album_art(album, band):
|
||||||
search = "%s %s" % (album, band)
|
try:
|
||||||
url = "http://www.seekacover.com/cd/" + urllib.parse.quote_plus(search)
|
search = "%s %s" % (album, band)
|
||||||
page = requests.get(url).text
|
url = "http://www.seekacover.com/cd/" + urllib.parse.quote_plus(search)
|
||||||
soup = BeautifulSoup(page)
|
page = requests.get(url).text
|
||||||
done = False
|
soup = BeautifulSoup(page, 'html.parser')
|
||||||
for img in soup.findAll('img'):
|
done = False
|
||||||
if done == False:
|
for img in soup.findAll('img'):
|
||||||
try:
|
if done == False:
|
||||||
if search.lower() in img['title'].lower():
|
try:
|
||||||
url = img['src']
|
if search.lower() in img['title'].lower():
|
||||||
urllib.request.urlretrieve(url, "cover.jpg")
|
url = img['src']
|
||||||
done = True
|
urllib.request.urlretrieve(url, "%s/cover.jpg" % album)
|
||||||
except Exception:
|
done = True
|
||||||
pass
|
except Exception:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
print ("%s There was an error parsing the album art of '%s'" % (output("e"), album) )
|
||||||
|
|
||||||
def embed_mp3(art_location, song_path):
|
def embed_mp3(art_location, song_path):
|
||||||
music = mutagen.id3.ID3(song_path)
|
music = mutagen.id3.ID3(song_path)
|
||||||
|
@ -58,26 +61,29 @@ def embed_mp3(art_location, song_path):
|
||||||
music.save()
|
music.save()
|
||||||
|
|
||||||
def find_mp3(song, author):
|
def find_mp3(song, author):
|
||||||
print ("'%s' by '%s'\n" % (song, author))
|
try:
|
||||||
query_string = urllib.parse.urlencode({"search_query" : ("%s %s lyrics" % (song, author))})
|
print ("'%s' by '%s'\n" % (song, author))
|
||||||
html_content = urllib.request.urlopen("http://www.youtube.com/results?" + query_string)
|
query_string = urllib.parse.urlencode({"search_query" : ("%s %s lyrics" % (song, author))})
|
||||||
search_results = re.findall(r'href=\"\/watch\?v=(.{11})', html_content.read().decode())
|
html_content = urllib.request.urlopen("http://www.youtube.com/results?" + query_string)
|
||||||
in_song = False
|
search_results = re.findall(r'href=\"\/watch\?v=(.{11})', html_content.read().decode())
|
||||||
i = -1
|
in_song = False
|
||||||
given_up_score = 0
|
i = -1
|
||||||
while in_song == False:
|
given_up_score = 0
|
||||||
if given_up_score >= 10:
|
while in_song == False:
|
||||||
in_song = True
|
if given_up_score >= 10:
|
||||||
i += 1
|
|
||||||
audio_url = ("http://www.youtube.com/watch?v=" + search_results[i])
|
|
||||||
title = (BeautifulSoup(urlopen(audio_url), 'html.parser')).title.string.lower()
|
|
||||||
song_title = (song.lower()).split("/")
|
|
||||||
for song in song_title:
|
|
||||||
if song in title:
|
|
||||||
in_song = True
|
in_song = True
|
||||||
if in_song == False:
|
i += 1
|
||||||
given_up_score += 1
|
audio_url = ("http://www.youtube.com/watch?v=" + search_results[i])
|
||||||
return audio_url
|
title = (BeautifulSoup(urlopen(audio_url), 'html.parser')).title.string.lower()
|
||||||
|
song_title = (song.lower()).split("/")
|
||||||
|
for song in song_title:
|
||||||
|
if song in title:
|
||||||
|
in_song = True
|
||||||
|
if in_song == False:
|
||||||
|
given_up_score += 1
|
||||||
|
return audio_url
|
||||||
|
except Exception as e:
|
||||||
|
print ("%s There was an error finding the url of '%s'" % (output("e"), song) )
|
||||||
|
|
||||||
def rip_mp3(song, author, album, tracknum):
|
def rip_mp3(song, author, album, tracknum):
|
||||||
audio_url = find_mp3(song, author)
|
audio_url = find_mp3(song, author)
|
||||||
|
@ -166,9 +172,9 @@ def visible(element):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def search_google(song_name, band):
|
def search_google(song_name, band, search_terms):
|
||||||
try:
|
try:
|
||||||
string = "%s %s" % (song_name, band)
|
string = "%s %s %s" % (song_name, band, search_terms)
|
||||||
filename = 'http://www.google.com/search?q=' + urllib.parse.quote_plus(string)
|
filename = 'http://www.google.com/search?q=' + urllib.parse.quote_plus(string)
|
||||||
hdr = {
|
hdr = {
|
||||||
'User-Agent':'Mozilla/5.0',
|
'User-Agent':'Mozilla/5.0',
|
||||||
|
@ -180,71 +186,81 @@ def search_google(song_name, band):
|
||||||
return visible_texts
|
return visible_texts
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print ("%s There was an error with Auto-parsing." % output("e"))
|
print ("%s There was an error with Auto-parsing." % output("e"))
|
||||||
return ""
|
return
|
||||||
|
|
||||||
def get_album(album_name, artist, what_to_do):
|
def get_album(album_name, artist, what_to_do, search):
|
||||||
visible_texts = search_google(album_name, artist)
|
visible_texts = search_google(album_name, artist, search)
|
||||||
songs = []
|
try:
|
||||||
num = True
|
songs = []
|
||||||
for i, j in enumerate(visible_texts):
|
num = True
|
||||||
if 'Songs' in j:
|
for i, j in enumerate(visible_texts):
|
||||||
if visible_texts[i + 1] == "1":
|
if 'Songs' in j:
|
||||||
indexed = i
|
if visible_texts[i + 1] == "1":
|
||||||
while num == True:
|
indexed = i
|
||||||
try:
|
while num == True:
|
||||||
if type(int(visible_texts[indexed])) is int:
|
try:
|
||||||
a = visible_texts[indexed + 1]
|
if type(int(visible_texts[indexed])) is int:
|
||||||
songs.append(a)
|
a = visible_texts[indexed + 1]
|
||||||
|
songs.append(a)
|
||||||
|
indexed += 1
|
||||||
|
except:
|
||||||
indexed += 1
|
indexed += 1
|
||||||
except:
|
if indexed >= 1000:
|
||||||
indexed += 1
|
num = False
|
||||||
if indexed >= 1000:
|
else:
|
||||||
num = False
|
pass
|
||||||
else:
|
if what_to_do == "download":
|
||||||
pass
|
for i, j in enumerate(songs):
|
||||||
if what_to_do == "download":
|
rip_mp3(j, artist, album_name, i + 1, True)
|
||||||
for i, j in enumerate(songs):
|
elif what_to_do == "stream":
|
||||||
rip_mp3(j, artist, album_name, i + 1)
|
for i in songs:
|
||||||
elif what_to_do == "stream":
|
a = find_mp3(i, artist)
|
||||||
for i in songs:
|
command = 'mpv "%s" --no-video' % a
|
||||||
a = find_mp3(i, artist)
|
os.system(command)
|
||||||
command = 'mpv "%s" --no-video' % a
|
except Exception as e:
|
||||||
os.system(command)
|
if str(e) == "local variable 'indexed' referenced before assignment":
|
||||||
|
get_album(album_name, artist, what_to_do, "")
|
||||||
|
else:
|
||||||
|
print ("%s There was an error with getting the contents \
|
||||||
|
of the album '%s':\n%s" % (output("e"), album_name, e) )
|
||||||
|
|
||||||
|
|
||||||
def get_torrent_url(args, category):
|
def get_torrent_url(args, category):
|
||||||
search_url = 'https://kat.cr/usearch/' + urllib.parse.quote_plus((" ".join(args) + " category:" + category))
|
try:
|
||||||
search_request_response = requests.get(search_url, verify=True)
|
search_url = 'https://kat.cr/usearch/' + urllib.parse.quote_plus((" ".join(args) + " category:" + category))
|
||||||
soup = BeautifulSoup(search_request_response.text, 'html.parser')
|
search_request_response = requests.get(search_url, verify=True)
|
||||||
results, ran_out = 0, False
|
soup = BeautifulSoup(search_request_response.text, 'html.parser')
|
||||||
i = 0
|
results, ran_out = 0, False
|
||||||
print ("")
|
i = 0
|
||||||
while True:
|
print ("")
|
||||||
movie_page = soup.find_all("a", class_="cellMainLink")
|
while True:
|
||||||
for number in range(0,10):
|
movie_page = soup.find_all("a", class_="cellMainLink")
|
||||||
try:
|
for number in range(0,10):
|
||||||
print ("%s. " % (number + 1) + movie_page[number].string)
|
try:
|
||||||
results += 1
|
print ("%s. " % (number + 1) + movie_page[number].string)
|
||||||
except Exception:
|
results += 1
|
||||||
ran_out = True
|
except Exception:
|
||||||
pass
|
ran_out = True
|
||||||
if ran_out == True:
|
pass
|
||||||
if results == 0:
|
if ran_out == True:
|
||||||
print (output('e') + " No results.\n")
|
if results == 0:
|
||||||
exit(0)
|
print (output('e') + " No results.\n")
|
||||||
else:
|
exit(0)
|
||||||
print (output('e') + " End of results.")
|
else:
|
||||||
if results != 0:
|
print (output('e') + " End of results.")
|
||||||
|
if results != 0:
|
||||||
|
|
||||||
try: a = int(str(input("\n%s What torrent would you like? " % output("q")))) - 1
|
try: a = int(str(input("\n%s What torrent would you like? " % output("q")))) - 1
|
||||||
except Exception: a = 100; pass
|
except Exception: a = 100; pass
|
||||||
# This code is either hyper-efficient, or completely against every ettiquite.
|
# This code is either hyper-efficient, or completely against every ettiquite.
|
||||||
|
|
||||||
if a in tuple(range(0, 10)):
|
if a in tuple(range(0, 10)):
|
||||||
search_url = requests.get('https://kat.cr' + movie_page[a].get('href'), verify=True)
|
search_url = requests.get('https://kat.cr' + movie_page[a].get('href'), verify=True)
|
||||||
soup = BeautifulSoup(search_url.text, 'html.parser')
|
soup = BeautifulSoup(search_url.text, 'html.parser')
|
||||||
torrent_url = 'https:' + soup.find_all('a', class_='siteButton')[0].get('href')
|
torrent_url = 'https:' + soup.find_all('a', class_='siteButton')[0].get('href')
|
||||||
return torrent_url
|
return torrent_url
|
||||||
|
except Exception as e:
|
||||||
|
print ("%s There was an error getting the torrent url with '%s'" % (output("e"), args))
|
||||||
|
|
||||||
def rip_playlist(file_name, what_to_do):
|
def rip_playlist(file_name, what_to_do):
|
||||||
txt_file = open(file_name, 'r')
|
txt_file = open(file_name, 'r')
|
||||||
|
@ -290,7 +306,7 @@ def main():
|
||||||
|
|
||||||
elif media == "album":
|
elif media == "album":
|
||||||
album_name = (" ".join(args)).split(" by ")
|
album_name = (" ".join(args)).split(" by ")
|
||||||
get_album(album_name[0], album_name[1], what_to_do)
|
get_album(album_name[0], album_name[1], what_to_do, "album")
|
||||||
|
|
||||||
elif media == "playlist":
|
elif media == "playlist":
|
||||||
rip_playlist(args[-1], what_to_do)
|
rip_playlist(args[-1], what_to_do)
|
||||||
|
|
Loading…
Reference in a new issue