From bb94e0d1c5c2abbdb377d9de97ce322a0feaa109 Mon Sep 17 00:00:00 2001 From: Akash <64683866+akamhy@users.noreply.github.com> Date: Mon, 20 Jul 2020 10:07:31 +0530 Subject: [PATCH] Update index.rst and remove dupes --- index.rst | 12 +++++----- tests/test_1.py | 17 +++++++++++++++ waybackpy/wrapper.py | 52 +++++++++++++++++++------------------------- 3 files changed, 45 insertions(+), 36 deletions(-) diff --git a/index.rst b/index.rst index dc7c590..f9e14c7 100644 --- a/index.rst +++ b/index.rst @@ -79,7 +79,7 @@ Capturing aka Saving an url using save() https://web.archive.org/web/20200504141153/https://github.com/akamhy/waybackpy Try this out in your browser @ -https://repl.it/repls/CompassionateRemoteOrigin#main.py\ +https://repl.it/@akamhy/WaybackPySaveExample\ Receiving the oldest archive for an URL using oldest() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -102,7 +102,7 @@ Receiving the oldest archive for an URL using oldest() http://web.archive.org/web/19981111184551/http://google.com:80/ Try this out in your browser @ -https://repl.it/repls/MixedSuperDimensions#main.py\ +https://repl.it/@akamhy/WaybackPyOldestExample\ Receiving the newest archive for an URL using newest() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -125,7 +125,7 @@ Receiving the newest archive for an URL using newest() https://web.archive.org/web/20200714013225/https://www.facebook.com/ Try this out in your browser @ -https://repl.it/repls/OblongMiniInteger#main.py\ +https://repl.it/@akamhy/WaybackPyNewestExample\ Receiving archive close to a specified year, month, day, hour, and minute using near() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -186,7 +186,7 @@ The library doesn't supports seconds yet. You are encourged to create a PR ;) Try this out in your browser @ -https://repl.it/repls/SparseDeadlySearchservice#main.py\ +https://repl.it/@akamhy/WaybackPyNearExample\ Get the content of webpage using get() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -222,7 +222,7 @@ Get the content of webpage using get() print(google_oldest_archive_source) Try this out in your browser @ -https://repl.it/repls/PinkHoneydewNonagon#main.py\ +https://repl.it/@akamhy/WaybackPyGetExample#main.py\ Count total archives for an URL using total\_archives() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -247,7 +247,7 @@ Count total archives for an URL using total\_archives() 2440 Try this out in your browser @ -https://repl.it/repls/DigitalUnconsciousNumbers#main.py\ +https://repl.it/@akamhy/WaybackPyTotalArchivesExample\ Tests ----- diff --git a/tests/test_1.py b/tests/test_1.py index 4995dcd..c63af3c 100644 --- a/tests/test_1.py +++ b/tests/test_1.py @@ -5,6 +5,11 @@ import waybackpy import pytest import random import time +if sys.version_info >= (3, 0): # If the python ver >= 3 + from urllib.request import Request, urlopen + from urllib.error import URLError +else: # For python2.x + from urllib2 import Request, urlopen, URLError user_agent = "Mozilla/5.0 (Windows NT 6.2; rv:20.0) Gecko/20121202 Firefox/20.0" @@ -103,6 +108,16 @@ def test_get(): target = waybackpy.Url("google.com", user_agent) assert "Welcome to Google" in target.get(target.oldest()) +def test_wayback_timestamp(): + ts = waybackpy.Url("https://www.google.com","UA").wayback_timestamp(year=2020,month=1,day=2,hour=3,minute=4) + assert "202001020304" in str(ts) + +def test_get_response(): + hdr = { 'User-Agent' : 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0'} + req = Request("https://www.google.com", headers=hdr) # nosec + response = waybackpy.Url("https://www.google.com","UA").get_response(req) + assert str(type(response)) == "" + def test_total_archives(): time.sleep(10) if sys.version_info > (3, 6): @@ -131,4 +146,6 @@ if __name__ == "__main__": print(".") #7 test_total_archives() print(".") #8 + test_wayback_timestamp() + print(".") #9 print("OK") diff --git a/waybackpy/wrapper.py b/waybackpy/wrapper.py index 27c60f0..3573e7c 100644 --- a/waybackpy/wrapper.py +++ b/waybackpy/wrapper.py @@ -65,7 +65,7 @@ class Url(): hdr = { 'User-Agent' : '%s' % self.user_agent } #nosec req = Request(request_url, headers=hdr) #nosec try: - response = urlopen(req, timeout=30) #nosec + response = urlopen(req) #nosec except Exception: try: response = urlopen(req) #nosec @@ -74,6 +74,12 @@ class Url(): header = response.headers def archive_url_parser(header): + """Parse out the archive from header.""" + #Regex1 + arch = re.search(r"rel=\"memento.*?(web\.archive\.org/web/[0-9]{14}/.*?)>", str(header)) + if arch: + return arch.group(1) + #Regex2 arch = re.search(r"X-Cache-Key:\shttps(.*)[A-Z]{2}", str(header)) if arch: return arch.group(1) @@ -93,22 +99,24 @@ class Url(): hdr = { 'User-Agent' : '%s' % user_agent } req = Request(url, headers=hdr) #nosec - - try: - resp=urlopen(req) #nosec - except Exception: - try: - resp=urlopen(req) #nosec - except Exception as e: - raise WaybackError(e) - + response = self.get_response(req) if not encoding: try: - encoding= resp.headers['content-type'].split('charset=')[-1] + encoding= response.headers['content-type'].split('charset=')[-1] except AttributeError: encoding = "UTF-8" + return response.read().decode(encoding.replace("text/html", "UTF-8", 1)) - return resp.read().decode(encoding.replace("text/html", "UTF-8", 1)) + def get_response(self, req): + """Get response for the supplied request.""" + try: + response = urlopen(req) #nosec + except Exception: + try: + response = urlopen(req) #nosec + except Exception as e: + raise WaybackError(e) + return response def near(self, **kwargs): """ Returns the archived from Wayback Machine for an URL closest to the time supplied. @@ -124,15 +132,7 @@ class Url(): request_url = "https://archive.org/wayback/available?url=%s×tamp=%s" % (self.clean_url(), str(timestamp)) hdr = { 'User-Agent' : '%s' % self.user_agent } req = Request(request_url, headers=hdr) # nosec - - try: - response = urlopen(req) #nosec - except Exception: - try: - response = urlopen(req) #nosec - except Exception as e: - raise WaybackError(e) - + response = self.get_response(req) data = json.loads(response.read().decode("UTF-8")) if not data["archived_snapshots"]: raise WaybackError("'%s' is not yet archived." % url) @@ -154,13 +154,5 @@ class Url(): hdr = { 'User-Agent' : '%s' % self.user_agent } request_url = "https://web.archive.org/cdx/search/cdx?url=%s&output=json&fl=statuscode" % self.clean_url() req = Request(request_url, headers=hdr) # nosec - - try: - response = urlopen(req) #nosec - except Exception: - try: - response = urlopen(req) #nosec - except Exception as e: - raise WaybackError(e) - + response = self.get_response(req) return str(response.read()).count(",") # Most efficient method to count number of archives (yet)