waybackpy/pywayback/wrapper.py

61 lines
1.9 KiB
Python
Raw Normal View History

2020-05-02 11:23:43 +02:00
# -*- coding: utf-8 -*-
2020-05-02 11:45:36 +02:00
2020-05-02 11:23:43 +02:00
from datetime import datetime
from urllib.request import Request, urlopen
import urllib.error
class TooManyArchivingRequestsError(Exception):
2020-05-02 11:49:52 +02:00
"""
Error when a single url reqeusted for archiving too many times in a short timespam.
2020-05-02 11:45:36 +02:00
Wayback machine doesn't supports archivng any url too many times in a short period of time.
2020-05-02 11:23:43 +02:00
"""
2020-05-02 11:45:36 +02:00
2020-05-02 11:23:43 +02:00
class ArchivingNotAllowed(Exception):
2020-05-02 11:49:52 +02:00
"""
Files like robots.txt are set to deny robot archiving.
Wayback machine respects these file, will not archive.
"""
2020-05-02 11:23:43 +02:00
2020-05-02 11:45:36 +02:00
2020-05-02 11:23:43 +02:00
def save(url,UA="pywayback python module"):
base_save_url = "https://web.archive.org/save/"
request_url = base_save_url + url
hdr = { 'User-Agent' : '%s' % UA }
req = Request(request_url, headers=hdr)
try:
response = urlopen(req)
except urllib.error.HTTPError as e:
raise TooManyArchivingRequestsError(e)
# print(response.read())
header = response.headers
if "exclusion.robots.policy" in str(header):
raise ArchivingNotAllowed("Can not archive %s. Disabled by site owner." % (url))
archive_id = header['Content-Location']
print(header)
archived_url = "https://web.archive.org" + archive_id
return archived_url
def near(
url,
year=datetime.utcnow().strftime('%Y'),
month=datetime.utcnow().strftime('%m'),
day=datetime.utcnow().strftime('%d'),
hour=datetime.utcnow().strftime('%H'),
minute=datetime.utcnow().strftime('%M'),
):
timestamp = str(year)+str(month)+str(day)+str(hour)+str(minute)
Rurl = "https://archive.org/wayback/available?url=%s&timestamp=%s" % (str(url), str(timestamp))
response = urlopen(Rurl) #nosec
encoding = response.info().get_content_charset('utf8')
import json
data = json.loads(response.read().decode(encoding))
archive_url = (data["archived_snapshots"]["closest"]["url"])
return archive_url
def oldest(url):
return near(url,1995)
def newest(url):
return near(url)