Remove sensitive data from logging in messages

This commit is contained in:
Sergey M․ 2017-11-11 20:49:03 +07:00
parent 79d1f8ed68
commit e4d9586562
No known key found for this signature in database
GPG key ID: 2C393E0F18A9236D
14 changed files with 14 additions and 14 deletions

View file

@ -78,7 +78,7 @@ class AnimeOnDemandIE(InfoExtractor):
post_url = urljoin(self._LOGIN_URL, post_url) post_url = urljoin(self._LOGIN_URL, post_url)
response = self._download_webpage( response = self._download_webpage(
post_url, None, 'Logging in as %s' % username, post_url, None, 'Logging in',
data=urlencode_postdata(login_form), headers={ data=urlencode_postdata(login_form), headers={
'Referer': self._LOGIN_URL, 'Referer': self._LOGIN_URL,
}) })

View file

@ -87,7 +87,7 @@ class AtresPlayerIE(InfoExtractor):
self._LOGIN_URL, urlencode_postdata(login_form)) self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self._download_webpage( response = self._download_webpage(
request, None, 'Logging in as %s' % username) request, None, 'Logging in')
error = self._html_search_regex( error = self._html_search_regex(
r'(?s)<ul[^>]+class="[^"]*\blist_error\b[^"]*">(.+?)</ul>', r'(?s)<ul[^>]+class="[^"]*\blist_error\b[^"]*">(.+?)</ul>',

View file

@ -59,7 +59,7 @@ class BambuserIE(InfoExtractor):
self._LOGIN_URL, urlencode_postdata(login_form)) self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Referer', self._LOGIN_URL) request.add_header('Referer', self._LOGIN_URL)
response = self._download_webpage( response = self._download_webpage(
request, None, 'Logging in as %s' % username) request, None, 'Logging in')
login_error = self._html_search_regex( login_error = self._html_search_regex(
r'(?s)<div class="messages error">(.+?)</div>', r'(?s)<div class="messages error">(.+?)</div>',

View file

@ -54,7 +54,7 @@ class DramaFeverBaseIE(AMPIE):
request = sanitized_Request( request = sanitized_Request(
self._LOGIN_URL, urlencode_postdata(login_form)) self._LOGIN_URL, urlencode_postdata(login_form))
response = self._download_webpage( response = self._download_webpage(
request, None, 'Logging in as %s' % username) request, None, 'Logging in')
if all(logout_pattern not in response if all(logout_pattern not in response
for logout_pattern in ['href="/accounts/logout/"', '>Log out<']): for logout_pattern in ['href="/accounts/logout/"', '>Log out<']):

View file

@ -57,7 +57,7 @@ class FunimationIE(InfoExtractor):
try: try:
data = self._download_json( data = self._download_json(
'https://prod-api-funimationnow.dadcdigital.com/api/auth/login/', 'https://prod-api-funimationnow.dadcdigital.com/api/auth/login/',
None, 'Logging in as %s' % username, data=urlencode_postdata({ None, 'Logging in', data=urlencode_postdata({
'username': username, 'username': username,
'password': password, 'password': password,
})) }))

View file

@ -70,7 +70,7 @@ class NocoIE(InfoExtractor):
return return
login = self._download_json( login = self._download_json(
self._LOGIN_URL, None, 'Logging in as %s' % username, self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata({ data=urlencode_postdata({
'a': 'login', 'a': 'login',
'cookie': '1', 'cookie': '1',

View file

@ -67,7 +67,7 @@ class PatreonIE(InfoExtractor):
'https://www.patreon.com/processLogin', 'https://www.patreon.com/processLogin',
compat_urllib_parse_urlencode(login_form).encode('utf-8') compat_urllib_parse_urlencode(login_form).encode('utf-8')
) )
login_page = self._download_webpage(request, None, note='Logging in as %s' % username) login_page = self._download_webpage(request, None, note='Logging in')
if re.search(r'onLoginFailed', login_page): if re.search(r'onLoginFailed', login_page):
raise ExtractorError('Unable to login, incorrect username and/or password', expected=True) raise ExtractorError('Unable to login, incorrect username and/or password', expected=True)

View file

@ -116,7 +116,7 @@ class PluralsightIE(PluralsightBaseIE):
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
response = self._download_webpage( response = self._download_webpage(
post_url, None, 'Logging in as %s' % username, post_url, None, 'Logging in',
data=urlencode_postdata(login_form), data=urlencode_postdata(login_form),
headers={'Content-Type': 'application/x-www-form-urlencoded'}) headers={'Content-Type': 'application/x-www-form-urlencoded'})

View file

@ -68,7 +68,7 @@ class RoosterTeethIE(InfoExtractor):
login_request = self._download_webpage( login_request = self._download_webpage(
self._LOGIN_URL, None, self._LOGIN_URL, None,
note='Logging in as %s' % username, note='Logging in',
data=urlencode_postdata(login_form), data=urlencode_postdata(login_form),
headers={ headers={
'Referer': self._LOGIN_URL, 'Referer': self._LOGIN_URL,

View file

@ -61,7 +61,7 @@ class SafariBaseIE(InfoExtractor):
request = sanitized_Request( request = sanitized_Request(
self._LOGIN_URL, urlencode_postdata(login_form), headers=headers) self._LOGIN_URL, urlencode_postdata(login_form), headers=headers)
login_page = self._download_webpage( login_page = self._download_webpage(
request, None, 'Logging in as %s' % username) request, None, 'Logging in')
if not is_logged(login_page): if not is_logged(login_page):
raise ExtractorError( raise ExtractorError(

View file

@ -101,7 +101,7 @@ class TwitchBaseIE(InfoExtractor):
fail(clean_html(login_page)) fail(clean_html(login_page))
redirect_page, handle = login_step( redirect_page, handle = login_step(
login_page, handle, 'Logging in as %s' % username, { login_page, handle, 'Logging in', {
'username': username, 'username': username,
'password': password, 'password': password,
}) })

View file

@ -164,7 +164,7 @@ class UdemyIE(InfoExtractor):
}) })
response = self._download_webpage( response = self._download_webpage(
self._LOGIN_URL, None, 'Logging in as %s' % username, self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(login_form), data=urlencode_postdata(login_form),
headers={ headers={
'Referer': self._ORIGIN_URL, 'Referer': self._ORIGIN_URL,

View file

@ -99,7 +99,7 @@ class VikiBaseIE(InfoExtractor):
login = self._call_api( login = self._call_api(
'sessions.json', None, 'sessions.json', None,
'Logging in as %s' % username, post_data=login_form) 'Logging in', post_data=login_form)
self._token = login.get('token') self._token = login.get('token')
if not self._token: if not self._token:

View file

@ -67,7 +67,7 @@ class VKBaseIE(InfoExtractor):
login_page = self._download_webpage( login_page = self._download_webpage(
'https://login.vk.com/?act=login', None, 'https://login.vk.com/?act=login', None,
note='Logging in as %s' % username, note='Logging in',
data=urlencode_postdata(login_form)) data=urlencode_postdata(login_form))
if re.search(r'onLoginFailed', login_page): if re.search(r'onLoginFailed', login_page):