Compare commits

...

7 Commits
2.1.1 ... 2.1.2

Author SHA1 Message Date
94cb08bb37 Update setup.py 2020-07-20 10:41:00 +05:30
af888db13e 2.1.2 2020-07-20 10:40:37 +05:30
d24f2408ee Update test_1.py 2020-07-20 10:31:47 +05:30
ddd2274015 Update test_1.py 2020-07-20 10:21:15 +05:30
99abdb7c67 Update test_1.py 2020-07-20 10:16:39 +05:30
f3bb9a8540 Update wrapper.py 2020-07-20 10:11:36 +05:30
bb94e0d1c5 Update index.rst and remove dupes 2020-07-20 10:07:31 +05:30
5 changed files with 45 additions and 64 deletions

View File

@ -79,7 +79,7 @@ Capturing aka Saving an url using save()
https://web.archive.org/web/20200504141153/https://github.com/akamhy/waybackpy
Try this out in your browser @
https://repl.it/repls/CompassionateRemoteOrigin#main.py\
https://repl.it/@akamhy/WaybackPySaveExample\
Receiving the oldest archive for an URL using oldest()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -102,7 +102,7 @@ Receiving the oldest archive for an URL using oldest()
http://web.archive.org/web/19981111184551/http://google.com:80/
Try this out in your browser @
https://repl.it/repls/MixedSuperDimensions#main.py\
https://repl.it/@akamhy/WaybackPyOldestExample\
Receiving the newest archive for an URL using newest()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -125,7 +125,7 @@ Receiving the newest archive for an URL using newest()
https://web.archive.org/web/20200714013225/https://www.facebook.com/
Try this out in your browser @
https://repl.it/repls/OblongMiniInteger#main.py\
https://repl.it/@akamhy/WaybackPyNewestExample\
Receiving archive close to a specified year, month, day, hour, and minute using near()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -186,7 +186,7 @@ The library doesn't supports seconds yet. You are encourged to create a
PR ;)
Try this out in your browser @
https://repl.it/repls/SparseDeadlySearchservice#main.py\
https://repl.it/@akamhy/WaybackPyNearExample\
Get the content of webpage using get()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -222,7 +222,7 @@ Get the content of webpage using get()
print(google_oldest_archive_source)
Try this out in your browser @
https://repl.it/repls/PinkHoneydewNonagon#main.py\
https://repl.it/@akamhy/WaybackPyGetExample#main.py\
Count total archives for an URL using total\_archives()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -247,7 +247,7 @@ Count total archives for an URL using total\_archives()
2440
Try this out in your browser @
https://repl.it/repls/DigitalUnconsciousNumbers#main.py\
https://repl.it/@akamhy/WaybackPyTotalArchivesExample\
Tests
-----

View File

@ -19,7 +19,7 @@ setup(
author = about['__author__'],
author_email = about['__author_email__'],
url = about['__url__'],
download_url = 'https://github.com/akamhy/waybackpy/archive/2.1.1.tar.gz',
download_url = 'https://github.com/akamhy/waybackpy/archive/2.1.2.tar.gz',
keywords = ['wayback', 'archive', 'archive website', 'wayback machine', 'Internet Archive'],
install_requires=[],
python_requires= ">=2.7",

View File

@ -5,6 +5,11 @@ import waybackpy
import pytest
import random
import time
if sys.version_info >= (3, 0): # If the python ver >= 3
from urllib.request import Request, urlopen
from urllib.error import URLError
else: # For python2.x
from urllib2 import Request, urlopen, URLError
user_agent = "Mozilla/5.0 (Windows NT 6.2; rv:20.0) Gecko/20121202 Firefox/20.0"
@ -103,6 +108,16 @@ def test_get():
target = waybackpy.Url("google.com", user_agent)
assert "Welcome to Google" in target.get(target.oldest())
def test_wayback_timestamp():
ts = waybackpy.Url("https://www.google.com","UA").wayback_timestamp(year=2020,month=1,day=2,hour=3,minute=4)
assert "202001020304" in str(ts)
def test_get_response():
hdr = { 'User-Agent' : 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0'}
req = Request("https://www.google.com", headers=hdr) # nosec
response = waybackpy.Url("https://www.google.com","UA").get_response(req)
assert response.code == 200
def test_total_archives():
time.sleep(10)
if sys.version_info > (3, 6):
@ -113,22 +128,3 @@ def test_total_archives():
time.sleep(5)
target = waybackpy.Url(" https://gaha.e4i3n.m5iai3kip6ied.cima/gahh2718gs/ahkst63t7gad8 ", user_agent)
assert target.total_archives() == 0
if __name__ == "__main__":
test_clean_url()
print(".") #1
test_url_check()
print(".") #1
test_get()
print(".") #3
test_near()
print(".") #4
test_newest()
print(".") #5
test_save()
print(".") #6
test_oldest()
print(".") #7
test_total_archives()
print(".") #8
print("OK")

View File

@ -3,7 +3,7 @@
__title__ = "waybackpy"
__description__ = "A Python library that interfaces with the Internet Archive's Wayback Machine API. Archive pages and retrieve archived pages easily."
__url__ = "https://akamhy.github.io/waybackpy/"
__version__ = "2.1.1"
__version__ = "2.1.2"
__author__ = "akamhy"
__author_email__ = "akash3pro@gmail.com"
__license__ = "MIT"

View File

@ -64,16 +64,15 @@ class Url():
request_url = ("https://web.archive.org/save/" + self.clean_url())
hdr = { 'User-Agent' : '%s' % self.user_agent } #nosec
req = Request(request_url, headers=hdr) #nosec
try:
response = urlopen(req, timeout=30) #nosec
except Exception:
try:
response = urlopen(req) #nosec
except Exception as e:
raise WaybackError(e)
header = response.headers
header = self.get_response(req).headers
def archive_url_parser(header):
"""Parse out the archive from header."""
#Regex1
arch = re.search(r"rel=\"memento.*?(web\.archive\.org/web/[0-9]{14}/.*?)>", str(header))
if arch:
return arch.group(1)
#Regex2
arch = re.search(r"X-Cache-Key:\shttps(.*)[A-Z]{2}", str(header))
if arch:
return arch.group(1)
@ -93,22 +92,24 @@ class Url():
hdr = { 'User-Agent' : '%s' % user_agent }
req = Request(url, headers=hdr) #nosec
try:
resp=urlopen(req) #nosec
except Exception:
try:
resp=urlopen(req) #nosec
except Exception as e:
raise WaybackError(e)
response = self.get_response(req)
if not encoding:
try:
encoding= resp.headers['content-type'].split('charset=')[-1]
encoding= response.headers['content-type'].split('charset=')[-1]
except AttributeError:
encoding = "UTF-8"
return response.read().decode(encoding.replace("text/html", "UTF-8", 1))
return resp.read().decode(encoding.replace("text/html", "UTF-8", 1))
def get_response(self, req):
"""Get response for the supplied request."""
try:
response = urlopen(req) #nosec
except Exception:
try:
response = urlopen(req) #nosec
except Exception as e:
raise WaybackError(e)
return response
def near(self, **kwargs):
""" Returns the archived from Wayback Machine for an URL closest to the time supplied.
@ -124,15 +125,7 @@ class Url():
request_url = "https://archive.org/wayback/available?url=%s&timestamp=%s" % (self.clean_url(), str(timestamp))
hdr = { 'User-Agent' : '%s' % self.user_agent }
req = Request(request_url, headers=hdr) # nosec
try:
response = urlopen(req) #nosec
except Exception:
try:
response = urlopen(req) #nosec
except Exception as e:
raise WaybackError(e)
response = self.get_response(req)
data = json.loads(response.read().decode("UTF-8"))
if not data["archived_snapshots"]:
raise WaybackError("'%s' is not yet archived." % url)
@ -154,13 +147,5 @@ class Url():
hdr = { 'User-Agent' : '%s' % self.user_agent }
request_url = "https://web.archive.org/cdx/search/cdx?url=%s&output=json&fl=statuscode" % self.clean_url()
req = Request(request_url, headers=hdr) # nosec
try:
response = urlopen(req) #nosec
except Exception:
try:
response = urlopen(req) #nosec
except Exception as e:
raise WaybackError(e)
response = self.get_response(req)
return str(response.read()).count(",") # Most efficient method to count number of archives (yet)