Update index.rst and remove dupes

This commit is contained in:
Akash 2020-07-20 10:07:31 +05:30 committed by GitHub
parent 1a78d88be2
commit bb94e0d1c5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 45 additions and 36 deletions

View File

@ -79,7 +79,7 @@ Capturing aka Saving an url using save()
https://web.archive.org/web/20200504141153/https://github.com/akamhy/waybackpy https://web.archive.org/web/20200504141153/https://github.com/akamhy/waybackpy
Try this out in your browser @ Try this out in your browser @
https://repl.it/repls/CompassionateRemoteOrigin#main.py\ https://repl.it/@akamhy/WaybackPySaveExample\
Receiving the oldest archive for an URL using oldest() Receiving the oldest archive for an URL using oldest()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -102,7 +102,7 @@ Receiving the oldest archive for an URL using oldest()
http://web.archive.org/web/19981111184551/http://google.com:80/ http://web.archive.org/web/19981111184551/http://google.com:80/
Try this out in your browser @ Try this out in your browser @
https://repl.it/repls/MixedSuperDimensions#main.py\ https://repl.it/@akamhy/WaybackPyOldestExample\
Receiving the newest archive for an URL using newest() Receiving the newest archive for an URL using newest()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -125,7 +125,7 @@ Receiving the newest archive for an URL using newest()
https://web.archive.org/web/20200714013225/https://www.facebook.com/ https://web.archive.org/web/20200714013225/https://www.facebook.com/
Try this out in your browser @ Try this out in your browser @
https://repl.it/repls/OblongMiniInteger#main.py\ https://repl.it/@akamhy/WaybackPyNewestExample\
Receiving archive close to a specified year, month, day, hour, and minute using near() Receiving archive close to a specified year, month, day, hour, and minute using near()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -186,7 +186,7 @@ The library doesn't supports seconds yet. You are encourged to create a
PR ;) PR ;)
Try this out in your browser @ Try this out in your browser @
https://repl.it/repls/SparseDeadlySearchservice#main.py\ https://repl.it/@akamhy/WaybackPyNearExample\
Get the content of webpage using get() Get the content of webpage using get()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -222,7 +222,7 @@ Get the content of webpage using get()
print(google_oldest_archive_source) print(google_oldest_archive_source)
Try this out in your browser @ Try this out in your browser @
https://repl.it/repls/PinkHoneydewNonagon#main.py\ https://repl.it/@akamhy/WaybackPyGetExample#main.py\
Count total archives for an URL using total\_archives() Count total archives for an URL using total\_archives()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -247,7 +247,7 @@ Count total archives for an URL using total\_archives()
2440 2440
Try this out in your browser @ Try this out in your browser @
https://repl.it/repls/DigitalUnconsciousNumbers#main.py\ https://repl.it/@akamhy/WaybackPyTotalArchivesExample\
Tests Tests
----- -----

View File

@ -5,6 +5,11 @@ import waybackpy
import pytest import pytest
import random import random
import time import time
if sys.version_info >= (3, 0): # If the python ver >= 3
from urllib.request import Request, urlopen
from urllib.error import URLError
else: # For python2.x
from urllib2 import Request, urlopen, URLError
user_agent = "Mozilla/5.0 (Windows NT 6.2; rv:20.0) Gecko/20121202 Firefox/20.0" user_agent = "Mozilla/5.0 (Windows NT 6.2; rv:20.0) Gecko/20121202 Firefox/20.0"
@ -103,6 +108,16 @@ def test_get():
target = waybackpy.Url("google.com", user_agent) target = waybackpy.Url("google.com", user_agent)
assert "Welcome to Google" in target.get(target.oldest()) assert "Welcome to Google" in target.get(target.oldest())
def test_wayback_timestamp():
ts = waybackpy.Url("https://www.google.com","UA").wayback_timestamp(year=2020,month=1,day=2,hour=3,minute=4)
assert "202001020304" in str(ts)
def test_get_response():
hdr = { 'User-Agent' : 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0'}
req = Request("https://www.google.com", headers=hdr) # nosec
response = waybackpy.Url("https://www.google.com","UA").get_response(req)
assert str(type(response)) == "<class 'http.client.HTTPResponse'>"
def test_total_archives(): def test_total_archives():
time.sleep(10) time.sleep(10)
if sys.version_info > (3, 6): if sys.version_info > (3, 6):
@ -131,4 +146,6 @@ if __name__ == "__main__":
print(".") #7 print(".") #7
test_total_archives() test_total_archives()
print(".") #8 print(".") #8
test_wayback_timestamp()
print(".") #9
print("OK") print("OK")

View File

@ -65,7 +65,7 @@ class Url():
hdr = { 'User-Agent' : '%s' % self.user_agent } #nosec hdr = { 'User-Agent' : '%s' % self.user_agent } #nosec
req = Request(request_url, headers=hdr) #nosec req = Request(request_url, headers=hdr) #nosec
try: try:
response = urlopen(req, timeout=30) #nosec response = urlopen(req) #nosec
except Exception: except Exception:
try: try:
response = urlopen(req) #nosec response = urlopen(req) #nosec
@ -74,6 +74,12 @@ class Url():
header = response.headers header = response.headers
def archive_url_parser(header): def archive_url_parser(header):
"""Parse out the archive from header."""
#Regex1
arch = re.search(r"rel=\"memento.*?(web\.archive\.org/web/[0-9]{14}/.*?)>", str(header))
if arch:
return arch.group(1)
#Regex2
arch = re.search(r"X-Cache-Key:\shttps(.*)[A-Z]{2}", str(header)) arch = re.search(r"X-Cache-Key:\shttps(.*)[A-Z]{2}", str(header))
if arch: if arch:
return arch.group(1) return arch.group(1)
@ -93,22 +99,24 @@ class Url():
hdr = { 'User-Agent' : '%s' % user_agent } hdr = { 'User-Agent' : '%s' % user_agent }
req = Request(url, headers=hdr) #nosec req = Request(url, headers=hdr) #nosec
response = self.get_response(req)
try:
resp=urlopen(req) #nosec
except Exception:
try:
resp=urlopen(req) #nosec
except Exception as e:
raise WaybackError(e)
if not encoding: if not encoding:
try: try:
encoding= resp.headers['content-type'].split('charset=')[-1] encoding= response.headers['content-type'].split('charset=')[-1]
except AttributeError: except AttributeError:
encoding = "UTF-8" encoding = "UTF-8"
return response.read().decode(encoding.replace("text/html", "UTF-8", 1))
return resp.read().decode(encoding.replace("text/html", "UTF-8", 1)) def get_response(self, req):
"""Get response for the supplied request."""
try:
response = urlopen(req) #nosec
except Exception:
try:
response = urlopen(req) #nosec
except Exception as e:
raise WaybackError(e)
return response
def near(self, **kwargs): def near(self, **kwargs):
""" Returns the archived from Wayback Machine for an URL closest to the time supplied. """ Returns the archived from Wayback Machine for an URL closest to the time supplied.
@ -124,15 +132,7 @@ class Url():
request_url = "https://archive.org/wayback/available?url=%s&timestamp=%s" % (self.clean_url(), str(timestamp)) request_url = "https://archive.org/wayback/available?url=%s&timestamp=%s" % (self.clean_url(), str(timestamp))
hdr = { 'User-Agent' : '%s' % self.user_agent } hdr = { 'User-Agent' : '%s' % self.user_agent }
req = Request(request_url, headers=hdr) # nosec req = Request(request_url, headers=hdr) # nosec
response = self.get_response(req)
try:
response = urlopen(req) #nosec
except Exception:
try:
response = urlopen(req) #nosec
except Exception as e:
raise WaybackError(e)
data = json.loads(response.read().decode("UTF-8")) data = json.loads(response.read().decode("UTF-8"))
if not data["archived_snapshots"]: if not data["archived_snapshots"]:
raise WaybackError("'%s' is not yet archived." % url) raise WaybackError("'%s' is not yet archived." % url)
@ -154,13 +154,5 @@ class Url():
hdr = { 'User-Agent' : '%s' % self.user_agent } hdr = { 'User-Agent' : '%s' % self.user_agent }
request_url = "https://web.archive.org/cdx/search/cdx?url=%s&output=json&fl=statuscode" % self.clean_url() request_url = "https://web.archive.org/cdx/search/cdx?url=%s&output=json&fl=statuscode" % self.clean_url()
req = Request(request_url, headers=hdr) # nosec req = Request(request_url, headers=hdr) # nosec
response = self.get_response(req)
try:
response = urlopen(req) #nosec
except Exception:
try:
response = urlopen(req) #nosec
except Exception as e:
raise WaybackError(e)
return str(response.read()).count(",") # Most efficient method to count number of archives (yet) return str(response.read()).count(",") # Most efficient method to count number of archives (yet)