update readme for newer oop and some test changes (#12)
* Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * docstrings * user agent ; more variants * description update * Update __init__.py * # -*- coding: utf-8 -*- * Update test_1.py * update docs for get() * Update README.md
This commit is contained in:
parent
700b60b5f8
commit
0ad27f5ecc
103
README.md
103
README.md
@ -13,7 +13,7 @@
|
||||
[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://github.com/akamhy/waybackpy/graphs/commit-activity)
|
||||
[![codecov](https://codecov.io/gh/akamhy/waybackpy/branch/master/graph/badge.svg)](https://codecov.io/gh/akamhy/waybackpy)
|
||||
![](https://img.shields.io/github/repo-size/akamhy/waybackpy.svg?label=Repo%20size&style=flat-square)
|
||||
[![contributions welcome](https://img.shields.io/static/v1.svg?label=Contributions&message=Welcome&color=0059b3&style=flat-square)]
|
||||
![contributions welcome](https://img.shields.io/static/v1.svg?label=Contributions&message=Welcome&color=0059b3&style=flat-square)
|
||||
|
||||
|
||||
![Internet Archive](https://upload.wikimedia.org/wikipedia/commons/thumb/8/84/Internet_Archive_logo_and_wordmark.svg/84px-Internet_Archive_logo_and_wordmark.svg.png)
|
||||
@ -54,126 +54,89 @@ pip install waybackpy
|
||||
## Usage
|
||||
|
||||
#### Capturing aka Saving an url Using save()
|
||||
|
||||
```diff
|
||||
+ waybackpy.save(url, UA=user_agent)
|
||||
```
|
||||
> url is mandatory. UA is not, but highly recommended.
|
||||
```python
|
||||
import waybackpy
|
||||
# Capturing a new archive on Wayback machine.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
archived_url = waybackpy.save("https://github.com/akamhy/waybackpy", UA = "Any-User-Agent")
|
||||
target_url = waybackpy.Url("https://github.com/akamhy/waybackpy", user_agnet="My-cool-user-agent")
|
||||
archived_url = target_url.save()
|
||||
print(archived_url)
|
||||
```
|
||||
This should print something similar to the following archived URL:
|
||||
This should print an URL similar to the following archived URL:
|
||||
|
||||
> <https://web.archive.org/web/20200504141153/https://github.com/akamhy/waybackpy>
|
||||
|
||||
<https://web.archive.org/web/20200504141153/https://github.com/akamhy/waybackpy>
|
||||
|
||||
#### Receiving the oldest archive for an URL Using oldest()
|
||||
|
||||
```diff
|
||||
+ waybackpy.oldest(url, UA=user_agent)
|
||||
```
|
||||
> url is mandatory. UA is not, but highly recommended.
|
||||
|
||||
|
||||
```python
|
||||
import waybackpy
|
||||
# retrieving the oldest archive on Wayback machine.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
oldest_archive = waybackpy.oldest("https://www.google.com/", UA = "Any-User-Agent")
|
||||
target_url = waybackpy.Url("https://www.google.com/", "My-cool-user-agent")
|
||||
oldest_archive = target_url.oldest()
|
||||
print(oldest_archive)
|
||||
```
|
||||
This returns the oldest available archive for <https://google.com>.
|
||||
This should print the oldest available archive for <https://google.com>.
|
||||
|
||||
> <http://web.archive.org/web/19981111184551/http://google.com:80/>
|
||||
|
||||
<http://web.archive.org/web/19981111184551/http://google.com:80/>
|
||||
|
||||
#### Receiving the newest archive for an URL using newest()
|
||||
|
||||
```diff
|
||||
+ waybackpy.newest(url, UA=user_agent)
|
||||
```
|
||||
> url is mandatory. UA is not, but highly recommended.
|
||||
|
||||
|
||||
```python
|
||||
import waybackpy
|
||||
# retrieving the newest archive on Wayback machine.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
newest_archive = waybackpy.newest("https://www.microsoft.com/en-us", UA = "Any-User-Agent")
|
||||
# retrieving the newest/latest archive on Wayback machine.
|
||||
target_url = waybackpy.Url(url="https://www.google.com/", user_agnet="My-cool-user-agent")
|
||||
newest_archive = target_url.newest()
|
||||
print(newest_archive)
|
||||
```
|
||||
This returns the newest available archive for <https://www.microsoft.com/en-us>, something just like this:
|
||||
This print the newest available archive for <https://www.microsoft.com/en-us>, something just like this:
|
||||
|
||||
> <http://web.archive.org/web/20200429033402/https://www.microsoft.com/en-us/>
|
||||
|
||||
<http://web.archive.org/web/20200429033402/https://www.microsoft.com/en-us/>
|
||||
|
||||
#### Receiving archive close to a specified year, month, day, hour, and minute using near()
|
||||
|
||||
```diff
|
||||
+ waybackpy.near(url, year=2020, month=1, day=1, hour=1, minute=1, UA=user_agent)
|
||||
```
|
||||
> url is mandotory. year,month,day,hour and minute are optional arguments. UA is not mandotory, but higly recomended.
|
||||
|
||||
|
||||
```python
|
||||
import waybackpy
|
||||
# retriving the the closest archive from a specified year.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
# supported argumnets are year,month,day,hour and minute
|
||||
archive_near_year = waybackpy.near("https://www.facebook.com/", year=2010, UA ="Any-User-Agent")
|
||||
target_url = waybackpy.Url(https://www.facebook.com/", "Any-User-Agent")
|
||||
archive_near_year = target_url.near(year=2010)
|
||||
print(archive_near_year)
|
||||
```
|
||||
returns : <http://web.archive.org/web/20100504071154/http://www.facebook.com/>
|
||||
|
||||
```waybackpy.near("https://www.facebook.com/", year=2010, month=1, UA ="Any-User-Agent")``` returns: <http://web.archive.org/web/20101111173430/http://www.facebook.com//>
|
||||
|
||||
```waybackpy.near("https://www.oracle.com/index.html", year=2019, month=1, day=5, UA ="Any-User-Agent")``` returns: <http://web.archive.org/web/20190105054437/https://www.oracle.com/index.html>
|
||||
> Please note that if you only specify the year, the current month and day are default arguments for month and day respectively. Do not expect just putting the year parameter would return the archive closer to January but the current month you are using the package. If you are using it in July 2018 and let's say you use ```waybackpy.near("https://www.facebook.com/", year=2011, UA ="Any-User-Agent")``` then you would be returned the nearest archive to July 2011 and not January 2011. You need to specify the month "1" for January.
|
||||
> Please note that if you only specify the year, the current month and day are default arguments for month and day respectively. Just putting the year parameter would not return the archive closer to January but the current month you are using the package. You need to specify the month "1" for January , 2 for february and so on.
|
||||
|
||||
> Do not pad (don't use zeros in the month, year, day, minute, and hour arguments). e.g. For January, set month = 1 and not month = 01.
|
||||
|
||||
|
||||
#### Get the content of webpage using get()
|
||||
|
||||
```diff
|
||||
+ waybackpy.get(url, encoding="UTF-8", UA=user_agent)
|
||||
```
|
||||
> url is mandatory. UA is not, but highly recommended. encoding is detected automatically, don't specify unless necessary.
|
||||
|
||||
```python
|
||||
from waybackpy import get
|
||||
import waybackpy
|
||||
# retriving the webpage from any url including the archived urls. Don't need to import other libraies :)
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
# supported argumnets are url, encoding and UA
|
||||
webpage = get("https://example.com/", UA="User-Agent")
|
||||
# supported argumnets encoding and user_agent
|
||||
target = waybackpy.Url("google.com", "any-user_agent")
|
||||
oldest_url = target.oldest()
|
||||
webpage = target.get(oldest_url) # We are getting the source of oldest archive of google.com.
|
||||
print(webpage)
|
||||
```
|
||||
> This should print the source code for <https://example.com/>.
|
||||
> This should print the source code for oldest archive of google.com. If no URL is passed in get() then it should retrive the source code of google.com and not any archive.
|
||||
|
||||
#### Count total archives for an URL using total_archives()
|
||||
|
||||
```diff
|
||||
+ waybackpy.total_archives(url, UA=user_agent)
|
||||
```
|
||||
> url is mandatory. UA is not, but highly recommended.
|
||||
|
||||
```python
|
||||
from waybackpy import total_archives
|
||||
# retriving the webpage from any url including the archived urls. Don't need to import other libraies :)
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
# supported argumnets are url and UA
|
||||
count = total_archives("https://en.wikipedia.org/wiki/Python (programming language)", UA="User-Agent")
|
||||
from waybackpy import Url
|
||||
# retriving the content of a webpage from any url including but not limited to the archived urls.
|
||||
count = Url("https://en.wikipedia.org/wiki/Python (programming language)", "User-Agent").total_archives()
|
||||
print(count)
|
||||
```
|
||||
> This should print an integer (int), which is the number of total archives on archive.org
|
||||
|
||||
|
||||
## Tests
|
||||
* [Here](https://github.com/akamhy/waybackpy/tree/master/tests)
|
||||
|
||||
|
||||
## Dependency
|
||||
* None, just python standard libraries (json, urllib and datetime). Both python 2 and 3 are supported :)
|
||||
* None, just python standard libraries (re, json, urllib and datetime). Both python 2 and 3 are supported :)
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[MIT License](https://github.com/akamhy/waybackpy/blob/master/LICENSE)
|
||||
|
@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
sys.path.append("..")
|
||||
import waybackpy
|
||||
@ -20,7 +21,7 @@ def test_url_check():
|
||||
def test_save():
|
||||
# Test for urls that exist and can be archived.
|
||||
url1="https://github.com/akamhy/waybackpy"
|
||||
target = waybackpy.Url(url1, user_agent)
|
||||
target = waybackpy.Url(url1, "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36")
|
||||
archived_url1 = target.save()
|
||||
assert url1 in archived_url1
|
||||
|
||||
@ -32,18 +33,18 @@ def test_save():
|
||||
# Test for urls not allowed to archive by robot.txt.
|
||||
with pytest.raises(Exception) as e_info:
|
||||
url3 = "http://www.archive.is/faq.html"
|
||||
target = waybackpy.Url(url3, user_agent)
|
||||
target = waybackpy.Url(url3, "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0")
|
||||
target.save()
|
||||
|
||||
# Non existent urls, test
|
||||
with pytest.raises(Exception) as e_info:
|
||||
url4 = "https://githfgdhshajagjstgeths537agajaajgsagudadhuss8762346887adsiugujsdgahub.us"
|
||||
target = waybackpy.Url(url3, user_agent)
|
||||
target = waybackpy.Url(url3, "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27")
|
||||
target.save()
|
||||
|
||||
def test_near():
|
||||
url = "google.com"
|
||||
target = waybackpy.Url(url, user_agent)
|
||||
target = waybackpy.Url(url, "Mozilla/5.0 (Windows; U; Windows NT 6.0; de-DE) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4")
|
||||
archive_near_year = target.near(year=2010)
|
||||
assert "2010" in archive_near_year
|
||||
|
||||
@ -53,7 +54,7 @@ def test_near():
|
||||
archive_near_day_month_year = target.near(year=2006, month=11, day=15)
|
||||
assert ("20061114" in archive_near_day_month_year) or ("20061115" in archive_near_day_month_year) or ("2006116" in archive_near_day_month_year)
|
||||
|
||||
target = waybackpy.Url("www.python.org", user_agent)
|
||||
target = waybackpy.Url("www.python.org", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246")
|
||||
archive_near_hour_day_month_year = target.near(year=2008, month=5, day=9, hour=15)
|
||||
assert ("2008050915" in archive_near_hour_day_month_year) or ("2008050914" in archive_near_hour_day_month_year) or ("2008050913" in archive_near_hour_day_month_year)
|
||||
|
||||
@ -86,19 +87,19 @@ def test_total_archives():
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_clean_url()
|
||||
print(".")
|
||||
print(".") #1
|
||||
test_url_check()
|
||||
print(".")
|
||||
print(".") #1
|
||||
test_get()
|
||||
print(".")
|
||||
print(".") #3
|
||||
test_near()
|
||||
print(".")
|
||||
print(".") #4
|
||||
test_newest()
|
||||
print(".")
|
||||
print(".") #5
|
||||
test_save()
|
||||
print(".")
|
||||
print(".") #6
|
||||
test_oldest()
|
||||
print(".")
|
||||
print(".") #7
|
||||
test_total_archives()
|
||||
print(".")
|
||||
print(".") #8
|
||||
print("OK")
|
||||
|
@ -10,13 +10,15 @@
|
||||
# ━━━━━━━━━━━┗━━┛━━━━━━━━━━━━━━━━━━━━━━━━┗━━┛━
|
||||
|
||||
"""
|
||||
A python wrapper for Internet Archive's Wayback Machine API.
|
||||
Waybackpy is a Python library that interfaces with the Internet Archive's Wayback Machine API.
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Archive pages and retrieve archived pages easily.
|
||||
|
||||
Usage:
|
||||
>>> import waybackpy
|
||||
>>> new_archive = waybackpy.save('https://www.python.org')
|
||||
>>> target_url = waybackpy.Url('https://www.python.org', 'Your-apps-cool-user-agent')
|
||||
>>> new_archive = target_url.save()
|
||||
>>> print(new_archive)
|
||||
https://web.archive.org/web/20200502170312/https://www.python.org/
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__title__ = "waybackpy"
|
||||
__description__ = "A python wrapper for Internet Archive's Wayback Machine API. Archive pages and retrieve archived pages easily."
|
||||
__description__ = "A Python library that interfaces with the Internet Archive's Wayback Machine API. Archive pages and retrieve archived pages easily."
|
||||
__url__ = "https://akamhy.github.io/waybackpy/"
|
||||
__version__ = "2.0.0"
|
||||
__author__ = "akamhy"
|
||||
|
@ -6,11 +6,7 @@ import json
|
||||
from datetime import datetime
|
||||
from waybackpy.exceptions import WaybackError
|
||||
|
||||
version = (3, 0)
|
||||
python_version = sys.version_info
|
||||
|
||||
|
||||
if python_version >= version: # If the python ver >= 3
|
||||
if sys.version_info >= (3, 0): # If the python ver >= 3
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import HTTPError, URLError
|
||||
else: # For python2.x
|
||||
@ -19,29 +15,38 @@ else: # For python2.x
|
||||
default_UA = "waybackpy python package - https://github.com/akamhy/waybackpy"
|
||||
|
||||
class Url():
|
||||
"""waybackpy Url object"""
|
||||
|
||||
|
||||
def __init__(self, url, user_agent=default_UA):
|
||||
self.url = url
|
||||
self.user_agent = user_agent
|
||||
|
||||
self.url_check() # checks url validity on init.
|
||||
|
||||
def __repr__(self):
|
||||
"""Representation of the object."""
|
||||
return "waybackpy.Url(url=%s, user_agent=%s)" % (self.url, self.user_agent)
|
||||
|
||||
def __str__(self):
|
||||
"""String representation of the object."""
|
||||
return "%s" % self.clean_url()
|
||||
|
||||
def __len__(self):
|
||||
"""Length of the URL."""
|
||||
return len(self.clean_url())
|
||||
|
||||
def url_check(self):
|
||||
"""Check for common URL problems."""
|
||||
if "." not in self.url:
|
||||
raise URLError("'%s' is not a vaild url." % self.url)
|
||||
|
||||
return True
|
||||
|
||||
def clean_url(self):
|
||||
"""Fix the URL, if possible."""
|
||||
return str(self.url).strip().replace(" ","_")
|
||||
|
||||
def wayback_timestamp(self, **kwargs):
|
||||
"""Return the formatted the timestamp."""
|
||||
return (
|
||||
str(kwargs["year"])
|
||||
+
|
||||
@ -55,50 +60,39 @@ class Url():
|
||||
)
|
||||
|
||||
def handle_HTTPError(self, e):
|
||||
if e.code >= 500:
|
||||
raise WaybackError(e)
|
||||
if e.code == 429:
|
||||
raise WaybackError(e)
|
||||
"""Handle some common HTTPErrors."""
|
||||
if e.code == 404:
|
||||
raise HTTPError(e)
|
||||
if e.code >= 400:
|
||||
raise WaybackError(e)
|
||||
|
||||
def save(self):
|
||||
"""Create a new archives for an URL on the Wayback Machine."""
|
||||
request_url = ("https://web.archive.org/save/" + self.clean_url())
|
||||
hdr = { 'User-Agent' : '%s' % self.user_agent } #nosec
|
||||
req = Request(request_url, headers=hdr) #nosec
|
||||
|
||||
|
||||
try:
|
||||
response = urlopen(req) #nosec
|
||||
except HTTPError as e:
|
||||
if self.handle_HTTPError(e) is None:
|
||||
response = urlopen(req, timeout=30) #nosec
|
||||
except Exception:
|
||||
try:
|
||||
response = urlopen(req, timeout=300) #nosec
|
||||
except Exception as e:
|
||||
raise WaybackError(e)
|
||||
except URLError:
|
||||
try:
|
||||
response = urlopen(req) #nosec
|
||||
except URLError as e:
|
||||
raise HTTPError(e)
|
||||
|
||||
header = response.headers
|
||||
|
||||
try:
|
||||
arch = re.search(r"rel=\"memento.*?web\.archive\.org(/web/[0-9]{14}/.*?)>", str(header)).group(1)
|
||||
except KeyError as e:
|
||||
raise WaybackError(e)
|
||||
|
||||
return "https://web.archive.org" + arch
|
||||
|
||||
def get(self, url=None, user_agent=None, encoding=None):
|
||||
|
||||
"""Returns the source code of the supplied URL. Auto detects the encoding if not supplied."""
|
||||
if not url:
|
||||
url = self.clean_url()
|
||||
|
||||
if not user_agent:
|
||||
user_agent = self.user_agent
|
||||
|
||||
hdr = { 'User-Agent' : '%s' % user_agent }
|
||||
req = Request(url, headers=hdr) #nosec
|
||||
|
||||
try:
|
||||
resp=urlopen(req) #nosec
|
||||
except URLError:
|
||||
@ -106,55 +100,54 @@ class Url():
|
||||
resp=urlopen(req) #nosec
|
||||
except URLError as e:
|
||||
raise HTTPError(e)
|
||||
|
||||
if not encoding:
|
||||
try:
|
||||
encoding= resp.headers['content-type'].split('charset=')[-1]
|
||||
except AttributeError:
|
||||
encoding = "UTF-8"
|
||||
|
||||
return resp.read().decode(encoding.replace("text/html", "UTF-8", 1))
|
||||
|
||||
def near(self, **kwargs):
|
||||
""" Returns the archived from Wayback Machine for an URL closest to the time supplied.
|
||||
Supported params are year, month, day, hour and minute.
|
||||
The non supplied parameters are default to the runtime time.
|
||||
"""
|
||||
year=kwargs.get("year", datetime.utcnow().strftime('%Y'))
|
||||
month=kwargs.get("month", datetime.utcnow().strftime('%m'))
|
||||
day=kwargs.get("day", datetime.utcnow().strftime('%d'))
|
||||
hour=kwargs.get("hour", datetime.utcnow().strftime('%H'))
|
||||
minute=kwargs.get("minute", datetime.utcnow().strftime('%M'))
|
||||
|
||||
timestamp = self.wayback_timestamp(year=year,month=month,day=day,hour=hour,minute=minute)
|
||||
request_url = "https://archive.org/wayback/available?url=%s×tamp=%s" % (self.clean_url(), str(timestamp))
|
||||
hdr = { 'User-Agent' : '%s' % self.user_agent }
|
||||
req = Request(request_url, headers=hdr) # nosec
|
||||
|
||||
try:
|
||||
response = urlopen(req) #nosec
|
||||
except HTTPError as e:
|
||||
except Exception as e:
|
||||
self.handle_HTTPError(e)
|
||||
|
||||
data = json.loads(response.read().decode("UTF-8"))
|
||||
if not data["archived_snapshots"]:
|
||||
raise WaybackError("'%s' is not yet archived." % url)
|
||||
|
||||
archive_url = (data["archived_snapshots"]["closest"]["url"])
|
||||
# wayback machine returns http sometimes, idk why? But they support https
|
||||
archive_url = archive_url.replace("http://web.archive.org/web/","https://web.archive.org/web/",1)
|
||||
return archive_url
|
||||
|
||||
def oldest(self, year=1994):
|
||||
"""Returns the oldest archive from Wayback Machine for an URL."""
|
||||
return self.near(year=year)
|
||||
|
||||
def newest(self):
|
||||
"""Returns the newest archive on Wayback Machine for an URL, sometimes you may not get the newest archive because wayback machine DB lag."""
|
||||
return self.near()
|
||||
|
||||
def total_archives(self):
|
||||
"""Returns the total number of archives on Wayback Machine for an URL."""
|
||||
hdr = { 'User-Agent' : '%s' % self.user_agent }
|
||||
request_url = "https://web.archive.org/cdx/search/cdx?url=%s&output=json&fl=statuscode" % self.clean_url()
|
||||
req = Request(request_url, headers=hdr) # nosec
|
||||
|
||||
try:
|
||||
response = urlopen(req) #nosec
|
||||
except HTTPError as e:
|
||||
except Exception as e:
|
||||
self.handle_HTTPError(e)
|
||||
|
||||
return str(response.read()).count(",") # Most efficient method to count number of archives (yet)
|
||||
|
Loading…
Reference in New Issue
Block a user