Compare commits
378 Commits
Author | SHA1 | Date | |
---|---|---|---|
cd5c3c61a5 | |||
87fb5ecd58 | |||
5954fcc646 | |||
89016d433c | |||
edaa1d5d54 | |||
16f94db144 | |||
25eb709ade | |||
6d233f24fc | |||
ec341fa8b3 | |||
cf18090f90 | |||
81162eebd0 | |||
ca4f79a2e3 | |||
27f2727049 | |||
118dc6c523 | |||
1216ffbc70 | |||
d58a5f0ee5 | |||
7e7412d9d1 | |||
70c38c5a60 | |||
f8bf9c16f9 | |||
2bbfee7b2f | |||
7317bd7183 | |||
e0dfbe0b7d | |||
0b631592ea | |||
d3a8f343f8 | |||
97f8b96411 | |||
004ff26196 | |||
a772c22431 | |||
b79f1c471e | |||
f49d67a411 | |||
ad8bd25633 | |||
d2a3946425 | |||
7b6401d59b | |||
ed6160c54f | |||
fcab19a40a | |||
5f3cd28046 | |||
9d9cc3328b | |||
b69e4dff37 | |||
d8cabdfdb5 | |||
320ef30371 | |||
e61447effd | |||
947647f2e7 | |||
bc1dc4dc96 | |||
5cbdfc040b | |||
3be6ac01fc | |||
b8b9bc098f | |||
946c28eddf | |||
004027f73b | |||
e86dd93b29 | |||
988568e8f0 | |||
f4c32a44fd | |||
7755e6391c | |||
9dbe3b3bf4 | |||
e84ba9f2c3 | |||
1250d105b4 | |||
f03b2cb6cb | |||
5e0ea023e6 | |||
8dff6f349e | |||
e04cfdfeaf | |||
0e2cc8f5ba | |||
9007149fef | |||
8b7603e241 | |||
5ea1d3ba4f | |||
4408c5e2ca | |||
9afe29a819 | |||
d79b10c74c | |||
32314dc102 | |||
50e176e2ba | |||
4007859c92 | |||
d8bd6c628d | |||
28f6ff8df2 | |||
7ac9353f74 | |||
15c7244a22 | |||
8510210e94 | |||
552967487e | |||
86a90a3840 | |||
759874cdc6 | |||
06095202fe | |||
06fc7855bf | |||
c49fe971fd | |||
d6783d5525 | |||
9262f5da21 | |||
d1a1cf2546 | |||
cd8a32ed1f | |||
57512c65ff | |||
d9ea26e11c | |||
2bea92b348 | |||
d506685f68 | |||
7844d15d99 | |||
c0252edff2 | |||
e7488f3a3e | |||
aed75ad1db | |||
d740959c34 | |||
2d83043ef7 | |||
31b1056217 | |||
97712b2c1e | |||
a8acc4c4d8 | |||
1bacd73002 | |||
79901ba968 | |||
df64e839d7 | |||
405e9a2a79 | |||
db551abbf6 | |||
d13dd4db1a | |||
d3bb8337a1 | |||
fd5e85420c | |||
5c685ef5d7 | |||
6a3d96b453 | |||
afe1b15a5f | |||
4fd9d142e7 | |||
5e9fdb40ce | |||
fa72098270 | |||
d18f955044 | |||
9c340d6967 | |||
78d0e0c126 | |||
564101e6f5 | |||
de5a3e1561 | |||
52e46fecc2 | |||
3b6415abc7 | |||
66e16d6d89 | |||
16b9bdd7f9 | |||
7adc01bff2 | |||
9bbd056268 | |||
2ab44391cf | |||
cc3628ae18 | |||
1d751b942b | |||
261a867a21 | |||
2e487e88d3 | |||
c8d0ad493a | |||
ce869177fd | |||
58616fb986 | |||
4e68cd5743 | |||
a7b805292d | |||
6dc6124dc4 | |||
5a7fc7d568 | |||
5a9c861cad | |||
dd1917c77e | |||
db8f902cff | |||
88cda94c0b | |||
09290f88d1 | |||
e5835091c9 | |||
7312ed1f4f | |||
6ae8f843d3 | |||
36b936820b | |||
a3bc6aad2b | |||
edc2f63d93 | |||
ffe0810b12 | |||
40233eb115 | |||
d549d31421 | |||
0725163af8 | |||
712471176b | |||
dcd7b03302 | |||
76205d9cf6 | |||
ec0a0d04cc | |||
7bb01df846 | |||
6142e0b353 | |||
a65990aee3 | |||
259a024eb1 | |||
91402792e6 | |||
eabf4dc046 | |||
5a7bd73565 | |||
4693dbf9c1 | |||
f4f2e51315 | |||
d6b7df6837 | |||
dafba5d0cb | |||
6c71dfbe41 | |||
a6470b1036 | |||
04cda4558e | |||
625ed63482 | |||
a03813315f | |||
a2550f17d7 | |||
15ef5816db | |||
93b52bd0fe | |||
28ff877081 | |||
3e3ecff9df | |||
ce64135ba8 | |||
2af6580ffb | |||
8a3c515176 | |||
d98c4f32ad | |||
e0a4b007d5 | |||
6fb6b2deee | |||
1882862992 | |||
0c6107e675 | |||
bd079978bf | |||
5dec4927cd | |||
62e5217b9e | |||
9823c809e9 | |||
db5737a857 | |||
ca0821a466 | |||
bb4dbc7d3c | |||
7c7fd75376 | |||
0b71433667 | |||
1b499a7594 | |||
da390ee8a3 | |||
d3e68d0e70 | |||
fde28d57aa | |||
6092e504c8 | |||
93ef60ecd2 | |||
461b3f74c9 | |||
3c53b411b0 | |||
8125526061 | |||
2dc81569a8 | |||
fd163f3d36 | |||
a0a918cf0d | |||
4943cf6873 | |||
bc3efc7d63 | |||
f89368f16d | |||
c919a6a605 | |||
0280fca189 | |||
60ee8b95a8 | |||
ca51c14332 | |||
525cf17c6f | |||
406e03c52f | |||
672b33e83a | |||
b19b840628 | |||
a6df4f899c | |||
7686e9c20d | |||
3c5932bc39 | |||
f9a986f489 | |||
0d7458ee90 | |||
ac8b9d6a50 | |||
58cd9c28e7 | |||
5088305a58 | |||
9f847a5e55 | |||
6c04c2f3d3 | |||
925be7b17e | |||
2b132456ac | |||
50e3154a4e | |||
7aef50428f | |||
d8ec0f5025 | |||
0a2f97c034 | |||
3e9cf23578 | |||
7f927ec7be | |||
9de6393cd5 | |||
91e7f65617 | |||
d465454019 | |||
1a81eb97fb | |||
6b3b2e2a7d | |||
82c65454e6 | |||
19710461b6 | |||
a3661d6b85 | |||
58375e4ef4 | |||
ea023e98da | |||
f1065ed1c8 | |||
315519b21f | |||
07c98661de | |||
2cd991a54e | |||
ede251afb3 | |||
a8ce970ca0 | |||
243af26bf6 | |||
0f1db94884 | |||
c304f58ea2 | |||
23f7222cb5 | |||
ce7294d990 | |||
c9fa114d2e | |||
8b6bacb28e | |||
32d8ad7780 | |||
cbf2f90faa | |||
4dde3e3134 | |||
1551e8f1c6 | |||
c84f09e2d2 | |||
57a32669b5 | |||
fe017cbcc8 | |||
5edb03d24b | |||
c5de2232ba | |||
ca9186c301 | |||
8a4b631c13 | |||
ec9ce92f48 | |||
e95d35c37f | |||
36d662b961 | |||
2835f8877e | |||
18cbd2fd30 | |||
a2812fb56f | |||
77effcf649 | |||
7272ef45a0 | |||
56116551ac | |||
4dcda94cb0 | |||
09f59b0182 | |||
ed24184b99 | |||
56bef064b1 | |||
44bb2cf5e4 | |||
e231228721 | |||
b8b2d6dfa9 | |||
3eca6294df | |||
eb037a0284 | |||
a01821f20b | |||
b21036f8df | |||
b43bacb7ac | |||
f7313b255a | |||
7457e1c793 | |||
f7493d823f | |||
7fa7b59ce3 | |||
78a608db50 | |||
93f7dfdaf9 | |||
83c6f256c9 | |||
dee9105794 | |||
3bfc3b46d0 | |||
553f150bee | |||
b3a7e714a5 | |||
cd9841713c | |||
1ea9548d46 | |||
be7642c837 | |||
a418a4e464 | |||
aec035ef1e | |||
6d37993ab9 | |||
72b80ca44e | |||
c10aa9279c | |||
68d809a7d6 | |||
4ad09a419b | |||
ddc6620f09 | |||
4066a65678 | |||
8e46a9ba7a | |||
a5a98b9b00 | |||
a721ab7d6c | |||
7db27ae5e1 | |||
8fd4462025 | |||
c458a15820 | |||
bae3412bee | |||
94cb08bb37 | |||
af888db13e | |||
d24f2408ee | |||
ddd2274015 | |||
99abdb7c67 | |||
f3bb9a8540 | |||
bb94e0d1c5 | |||
1a78d88be2 | |||
3ec61758b3 | |||
83c962166d | |||
e87dee3bdf | |||
b27bfff15a | |||
970fc1cd08 | |||
65391bf14b | |||
8ab116f276 | |||
6f82041ec9 | |||
11059c960e | |||
eee1b8eba1 | |||
f7de8f5575 | |||
3fa0c32064 | |||
aa1e3b8825 | |||
58d2d585c8 | |||
e8efed2e2f | |||
49089b7321 | |||
55d8687566 | |||
0fa28527af | |||
68259fd2d9 | |||
e7086a89d3 | |||
e39467227c | |||
ba840404cf | |||
8fbd2d9e55 | |||
eebf6043de | |||
3d3b09d6d8 | |||
ef15b5863c | |||
256c0cdb6b | |||
12c72a8294 | |||
0ad27f5ecc | |||
700b60b5f8 | |||
11032596c8 | |||
9727f92168 | |||
d2893fec13 | |||
f1353b2129 | |||
c76a95ef90 | |||
62d88359ce | |||
9942c474c9 | |||
dfb736e794 | |||
84d1766917 | |||
9d3cdfafb3 | |||
20a16bfa45 | |||
f2112c73f6 | |||
9860527d96 | |||
9ac1e877c8 | |||
f881705d00 | |||
f015c3f4f3 | |||
42ac399362 | |||
e9d010c793 | |||
58a6409528 | |||
7ca2029158 | |||
80331833f2 | |||
5e3d3a815f | |||
6182a18cf4 | |||
9bca750310 |
34
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
34
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: akamhy
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Version:**
|
||||
|
||||
- OS: [e.g. iOS]
|
||||
- Version [e.g. 22]
|
||||
- Is latest version? [e.g. Yes/No]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: akamhy
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
30
.github/workflows/build-test.yml
vendored
Normal file
30
.github/workflows/build-test.yml
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.7', '3.10']
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -U setuptools wheel
|
||||
- name: Build test the package
|
||||
run: |
|
||||
python setup.py sdist bdist_wheel
|
70
.github/workflows/codeql-analysis.yml
vendored
Normal file
70
.github/workflows/codeql-analysis.yml
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
schedule:
|
||||
- cron: '30 6 * * 1'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'python' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||
# Learn more about CodeQL language support at https://git.io/codeql-language-support
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
31
.github/workflows/python-publish.yml
vendored
Normal file
31
.github/workflows/python-publish.yml
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
# This workflows will upload a Python Package using Twine when a release is created
|
||||
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
|
||||
|
||||
name: Upload Python Package
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools wheel twine
|
||||
- name: Build and publish
|
||||
env:
|
||||
TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
|
||||
run: |
|
||||
python setup.py sdist bdist_wheel
|
||||
twine upload dist/*
|
43
.github/workflows/unit-test.yml
vendored
Normal file
43
.github/workflows/unit-test.yml
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10']
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install '.[dev]'
|
||||
- name: Lint with flake8
|
||||
run: |
|
||||
flake8 . --count --show-source --statistics
|
||||
- name: Lint with black
|
||||
run: |
|
||||
black . --check --diff
|
||||
- name: Static type test with mypy
|
||||
run: |
|
||||
mypy -p waybackpy -p tests
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest
|
||||
- name: Upload coverage to Codecov
|
||||
run: |
|
||||
bash <(curl -s https://codecov.io/bash) -t ${{ secrets.CODECOV_TOKEN }}
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,3 +1,6 @@
|
||||
# Files generated while testing
|
||||
*-urls-*.txt
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
|
7
.pep8speaks.yml
Normal file
7
.pep8speaks.yml
Normal file
@ -0,0 +1,7 @@
|
||||
scanner:
|
||||
diff_only: True
|
||||
linter: flake8
|
||||
|
||||
flake8:
|
||||
max-line-length: 88
|
||||
extend-ignore: W503,W605
|
14
.travis.yml
14
.travis.yml
@ -1,14 +0,0 @@
|
||||
language: python
|
||||
python:
|
||||
- "2.7"
|
||||
- "3.6"
|
||||
- "3.8"
|
||||
os: linux
|
||||
dist: xenial
|
||||
cache: pip
|
||||
install:
|
||||
- pip install pytest
|
||||
before_script:
|
||||
cd tests
|
||||
script:
|
||||
- pytest test_1.py
|
@ -1,8 +1,12 @@
|
||||
{
|
||||
"scanSettings": {
|
||||
"baseBranches": []
|
||||
},
|
||||
"checkRunSettings": {
|
||||
"vulnerableCheckRunConclusionLevel": "failure"
|
||||
"vulnerableCheckRunConclusionLevel": "failure",
|
||||
"displayMode": "diff"
|
||||
},
|
||||
"issueSettings": {
|
||||
"minSeverityLevel": "LOW"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
128
CODE_OF_CONDUCT.md
Normal file
128
CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,128 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
akamhy@yahoo.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
<https://www.contributor-covenant.org/version/2/0/code_of_conduct.html>.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
<https://www.contributor-covenant.org/faq>. Translations are available at
|
||||
<https://www.contributor-covenant.org/translations>.
|
16
CONTRIBUTORS.md
Normal file
16
CONTRIBUTORS.md
Normal file
@ -0,0 +1,16 @@
|
||||
# CONTRIBUTORS
|
||||
|
||||
## AUTHORS
|
||||
|
||||
- akamhy (<https://github.com/akamhy>)
|
||||
- eggplants (<https://github.com/eggplants>)
|
||||
- danvalen1 (<https://github.com/danvalen1>)
|
||||
- AntiCompositeNumber (<https://github.com/AntiCompositeNumber>)
|
||||
- rafaelrdealmeida (<https://github.com/rafaelrdealmeida>)
|
||||
- jonasjancarik (<https://github.com/jonasjancarik>)
|
||||
- jfinkhaeuser (<https://github.com/jfinkhaeuser>)
|
||||
|
||||
## ACKNOWLEDGEMENTS
|
||||
|
||||
- mhmdiaa (<https://github.com/mhmdiaa>) for <https://gist.github.com/mhmdiaa/adf6bff70142e5091792841d4b372050>. known_urls is based on this gist.
|
||||
- dequeued0 (<https://github.com/dequeued0>) for reporting bugs and useful feature requests.
|
2
LICENSE
2
LICENSE
@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2020 akamhy
|
||||
Copyright (c) 2020-2022 waybackpy contributors ( https://github.com/akamhy/waybackpy/graphs/contributors )
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
240
README.md
240
README.md
@ -1,176 +1,142 @@
|
||||
# waybackpy
|
||||
[](https://travis-ci.org/akamhy/waybackpy)
|
||||
[](https://pypistats.org/packages/waybackpy)
|
||||
[](https://github.com/akamhy/waybackpy/releases)
|
||||
[](https://www.codacy.com/manual/akamhy/waybackpy?utm_source=github.com&utm_medium=referral&utm_content=akamhy/waybackpy&utm_campaign=Badge_Grade)
|
||||
[](https://github.com/akamhy/waybackpy/blob/master/LICENSE)
|
||||
[](https://codeclimate.com/github/akamhy/waybackpy/maintainability)
|
||||
[](https://www.codefactor.io/repository/github/akamhy/waybackpy)
|
||||
[](https://www.python.org/)
|
||||

|
||||

|
||||
[](https://github.com/akamhy/waybackpy/graphs/commit-activity)
|
||||
<!-- markdownlint-disable MD033 MD041 -->
|
||||
<div align="center">
|
||||
|
||||
<img src="https://raw.githubusercontent.com/akamhy/waybackpy/master/assets/waybackpy_logo.svg"><br>
|
||||
|
||||
<h3>A Python package & CLI tool that interfaces with the Wayback Machine API</h3>
|
||||
|
||||

|
||||

|
||||
</div>
|
||||
|
||||
The waybackpy is a python wrapper for [Internet Archive](https://en.wikipedia.org/wiki/Internet_Archive)'s [Wayback Machine](https://en.wikipedia.org/wiki/Wayback_Machine).
|
||||
<p align="center">
|
||||
<a href="https://github.com/akamhy/waybackpy/actions?query=workflow%3ATests"><img alt="Unit Tests" src="https://github.com/akamhy/waybackpy/workflows/Tests/badge.svg"></a>
|
||||
<a href="https://codecov.io/gh/akamhy/waybackpy"><img alt="codecov" src="https://codecov.io/gh/akamhy/waybackpy/branch/master/graph/badge.svg"></a>
|
||||
<a href="https://pypi.org/project/waybackpy/"><img alt="pypi" src="https://img.shields.io/pypi/v/waybackpy.svg"></a>
|
||||
<a href="https://pepy.tech/project/waybackpy?versions=2*&versions=1*&versions=3*"><img alt="Downloads" src="https://pepy.tech/badge/waybackpy/month"></a>
|
||||
<a href="https://app.codacy.com/gh/akamhy/waybackpy?utm_source=github.com&utm_medium=referral&utm_content=akamhy/waybackpy&utm_campaign=Badge_Grade_Settings"><img alt="Codacy Badge" src="https://api.codacy.com/project/badge/Grade/6d777d8509f642ac89a20715bb3a6193"></a>
|
||||
<a href="https://github.com/akamhy/waybackpy/commits/master"><img alt="GitHub lastest commit" src="https://img.shields.io/github/last-commit/akamhy/waybackpy?color=blue&style=flat-square"></a>
|
||||
<a href="#"><img alt="PyPI - Python Version" src="https://img.shields.io/pypi/pyversions/waybackpy?style=flat-square"></a>
|
||||
<a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
|
||||
</p>
|
||||
|
||||
Table of contents
|
||||
=================
|
||||
<!--ts-->
|
||||
---
|
||||
|
||||
* [Installation](https://github.com/akamhy/waybackpy#installation)
|
||||
# <img src="https://github.githubassets.com/images/icons/emoji/unicode/2b50.png" width="30"></img> Introduction
|
||||
|
||||
* [Usage](https://github.com/akamhy/waybackpy#usage)
|
||||
* [Saving an url using save()](https://github.com/akamhy/waybackpy#capturing-aka-saving-an-url-using-save)
|
||||
* [Receiving the oldest archive for an URL Using oldest()](https://github.com/akamhy/waybackpy#receiving-the-oldest-archive-for-an-url-using-oldest)
|
||||
* [Receiving the recent most/newest archive for an URL using newest()](https://github.com/akamhy/waybackpy#receiving-the-newest-archive-for-an-url-using-newest)
|
||||
* [Receiving archive close to a specified year, month, day, hour, and minute using near()](https://github.com/akamhy/waybackpy#receiving-archive-close-to-a-specified-year-month-day-hour-and-minute-using-near)
|
||||
* [Get the content of webpage using get()](https://github.com/akamhy/waybackpy#get-the-content-of-webpage-using-get)
|
||||
* [Count total archives for an URL using total_archives()](https://github.com/akamhy/waybackpy#count-total-archives-for-an-url-using-total_archives)
|
||||
Waybackpy is a [Python package](https://www.udacity.com/blog/2021/01/what-is-a-python-package.html) and a [CLI](https://www.w3schools.com/whatis/whatis_cli.asp) tool that interfaces with the [Wayback Machine](https://en.wikipedia.org/wiki/Wayback_Machine) API.
|
||||
|
||||
Wayback Machine has 3 client side [API](https://www.redhat.com/en/topics/api/what-are-application-programming-interfaces)s.
|
||||
|
||||
* [Tests](https://github.com/akamhy/waybackpy#tests)
|
||||
- [Save API](https://github.com/akamhy/waybackpy/wiki/Wayback-Machine-APIs#save-api)
|
||||
- [Availability API](https://github.com/akamhy/waybackpy/wiki/Wayback-Machine-APIs#availability-api)
|
||||
- [CDX API](https://github.com/akamhy/waybackpy/wiki/Wayback-Machine-APIs#cdx-api)
|
||||
|
||||
* [Dependency](https://github.com/akamhy/waybackpy#dependency)
|
||||
These three APIs can be accessed via the waybackpy either by importing it in a script or from the CLI.
|
||||
|
||||
* [License](https://github.com/akamhy/waybackpy#license)
|
||||
## <img src="https://github.githubassets.com/images/icons/emoji/unicode/1f3d7.png" width="20"></img> Installation
|
||||
|
||||
<!--te-->
|
||||
**Using [pip](https://en.wikipedia.org/wiki/Pip_(package_manager)), from [PyPI](https://pypi.org/) (recommended)**:
|
||||
|
||||
## Installation
|
||||
Using [pip](https://en.wikipedia.org/wiki/Pip_(package_manager)):
|
||||
|
||||
**pip install waybackpy**
|
||||
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
#### Capturing aka Saving an url Using save()
|
||||
|
||||
```diff
|
||||
+ waybackpy.save(url, UA=user_agent)
|
||||
```bash
|
||||
pip install waybackpy
|
||||
```
|
||||
> url is mandatory. UA is not, but highly recommended.
|
||||
```python
|
||||
import waybackpy
|
||||
# Capturing a new archive on Wayback machine.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
archived_url = waybackpy.save("https://github.com/akamhy/waybackpy", UA = "Any-User-Agent")
|
||||
print(archived_url)
|
||||
|
||||
**Using [conda](https://en.wikipedia.org/wiki/Conda_(package_manager)), from [conda-forge](https://anaconda.org/conda-forge/waybackpy) (recommended)**:
|
||||
|
||||
See also [waybackpy feedstock](https://github.com/conda-forge/waybackpy-feedstock), maintainers are [@rafaelrdealmeida](https://github.com/rafaelrdealmeida/),
|
||||
[@labriunesp](https://github.com/labriunesp/)
|
||||
and [@akamhy](https://github.com/akamhy/).
|
||||
|
||||
```bash
|
||||
conda install -c conda-forge waybackpy
|
||||
```
|
||||
This should print something similar to the following archived URL:
|
||||
|
||||
<https://web.archive.org/web/20200504141153/https://github.com/akamhy/waybackpy>
|
||||
**Install directly from [this git repository](https://github.com/akamhy/waybackpy) (NOT recommended)**:
|
||||
|
||||
#### Receiving the oldest archive for an URL Using oldest()
|
||||
|
||||
```diff
|
||||
+ waybackpy.oldest(url, UA=user_agent)
|
||||
```bash
|
||||
pip install git+https://github.com/akamhy/waybackpy.git
|
||||
```
|
||||
> url is mandatory. UA is not, but highly recommended.
|
||||
|
||||
## <img src="https://github.githubassets.com/images/icons/emoji/unicode/1f433.png" width="20"></img> Docker Image
|
||||
|
||||
Docker Hub : <https://hub.docker.com/r/secsi/waybackpy>
|
||||
|
||||
[Docker image](https://searchitoperations.techtarget.com/definition/Docker-image) is automatically updated on every release by [Regulary and Automatically Updated Docker Images](https://github.com/cybersecsi/RAUDI) (RAUDI).
|
||||
|
||||
RAUDI is a tool by SecSI (<https://secsi.io>), an Italian cybersecurity startup.
|
||||
|
||||
## <img src="https://github.githubassets.com/images/icons/emoji/unicode/1f680.png" width="20"></img> Usage
|
||||
|
||||
### As a Python package
|
||||
|
||||
#### Save API aka SavePageNow
|
||||
|
||||
```python
|
||||
import waybackpy
|
||||
# retrieving the oldest archive on Wayback machine.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
oldest_archive = waybackpy.oldest("https://www.google.com/", UA = "Any-User-Agent")
|
||||
print(oldest_archive)
|
||||
>>> from waybackpy import WaybackMachineSaveAPI
|
||||
>>> url = "https://github.com"
|
||||
>>> user_agent = "Mozilla/5.0 (Windows NT 5.1; rv:40.0) Gecko/20100101 Firefox/40.0"
|
||||
>>>
|
||||
>>> save_api = WaybackMachineSaveAPI(url, user_agent)
|
||||
>>> save_api.save()
|
||||
https://web.archive.org/web/20220118125249/https://github.com/
|
||||
>>> save_api.cached_save
|
||||
False
|
||||
>>> save_api.timestamp()
|
||||
datetime.datetime(2022, 1, 18, 12, 52, 49)
|
||||
```
|
||||
This returns the oldest available archive for <https://google.com>.
|
||||
|
||||
<http://web.archive.org/web/19981111184551/http://google.com:80/>
|
||||
|
||||
#### Receiving the newest archive for an URL using newest()
|
||||
|
||||
```diff
|
||||
+ waybackpy.newest(url, UA=user_agent)
|
||||
```
|
||||
> url is mandatory. UA is not, but highly recommended.
|
||||
|
||||
#### Availability API
|
||||
|
||||
```python
|
||||
import waybackpy
|
||||
# retrieving the newest archive on Wayback machine.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
newest_archive = waybackpy.newest("https://www.microsoft.com/en-us", UA = "Any-User-Agent")
|
||||
print(newest_archive)
|
||||
>>> from waybackpy import WaybackMachineAvailabilityAPI
|
||||
>>>
|
||||
>>> url = "https://google.com"
|
||||
>>> user_agent = "Mozilla/5.0 (Windows NT 5.1; rv:40.0) Gecko/20100101 Firefox/40.0"
|
||||
>>>
|
||||
>>> availability_api = WaybackMachineAvailabilityAPI(url, user_agent)
|
||||
>>>
|
||||
>>> availability_api.oldest()
|
||||
https://web.archive.org/web/19981111184551/http://google.com:80/
|
||||
>>>
|
||||
>>> availability_api.newest()
|
||||
https://web.archive.org/web/20220118150444/https://www.google.com/
|
||||
>>>
|
||||
>>> availability_api.near(year=2010, month=10, day=10, hour=10)
|
||||
https://web.archive.org/web/20101010101708/http://www.google.com/
|
||||
```
|
||||
This returns the newest available archive for <https://www.microsoft.com/en-us>, something just like this:
|
||||
|
||||
<http://web.archive.org/web/20200429033402/https://www.microsoft.com/en-us/>
|
||||
|
||||
#### Receiving archive close to a specified year, month, day, hour, and minute using near()
|
||||
|
||||
```diff
|
||||
+ waybackpy.near(url, year=2020, month=1, day=1, hour=1, minute=1, UA=user_agent)
|
||||
```
|
||||
> url is mandotory. year,month,day,hour and minute are optional arguments. UA is not mandotory, but higly recomended.
|
||||
|
||||
#### CDX API aka CDXServerAPI
|
||||
|
||||
```python
|
||||
import waybackpy
|
||||
# retriving the the closest archive from a specified year.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
# supported argumnets are year,month,day,hour and minute
|
||||
archive_near_year = waybackpy.near("https://www.facebook.com/", year=2010, UA ="Any-User-Agent")
|
||||
print(archive_near_year)
|
||||
>>> from waybackpy import WaybackMachineCDXServerAPI
|
||||
>>> url = "https://pypi.org"
|
||||
>>> user_agent = "Mozilla/5.0 (Windows NT 5.1; rv:40.0) Gecko/20100101 Firefox/40.0"
|
||||
>>> cdx = WaybackMachineCDXServerAPI(url, user_agent, start_timestamp=2016, end_timestamp=2017)
|
||||
>>> for item in cdx.snapshots():
|
||||
... print(item.archive_url)
|
||||
...
|
||||
https://web.archive.org/web/20160110011047/http://pypi.org/
|
||||
https://web.archive.org/web/20160305104847/http://pypi.org/
|
||||
.
|
||||
. # URLS REDACTED FOR READABILITY
|
||||
.
|
||||
https://web.archive.org/web/20171127171549/https://pypi.org/
|
||||
https://web.archive.org/web/20171206002737/http://pypi.org:80/
|
||||
```
|
||||
returns : <http://web.archive.org/web/20100504071154/http://www.facebook.com/>
|
||||
|
||||
```waybackpy.near("https://www.facebook.com/", year=2010, month=1, UA ="Any-User-Agent")``` returns: <http://web.archive.org/web/20101111173430/http://www.facebook.com//>
|
||||
> Documentation is at <https://github.com/akamhy/waybackpy/wiki/Python-package-docs>.
|
||||
|
||||
```waybackpy.near("https://www.oracle.com/index.html", year=2019, month=1, day=5, UA ="Any-User-Agent")``` returns: <http://web.archive.org/web/20190105054437/https://www.oracle.com/index.html>
|
||||
> Please note that if you only specify the year, the current month and day are default arguments for month and day respectively. Do not expect just putting the year parameter would return the archive closer to January but the current month you are using the package. If you are using it in July 2018 and let's say you use ```waybackpy.near("https://www.facebook.com/", year=2011, UA ="Any-User-Agent")``` then you would be returned the nearest archive to July 2011 and not January 2011. You need to specify the month "1" for January.
|
||||
### As a CLI tool
|
||||
|
||||
> Do not pad (don't use zeros in the month, year, day, minute, and hour arguments). e.g. For January, set month = 1 and not month = 01.
|
||||
Demo video on [asciinema.org](https://asciinema.org), you can copy the text from video:
|
||||
|
||||
#### Get the content of webpage using get()
|
||||
[](https://asciinema.org/a/464367)
|
||||
|
||||
```diff
|
||||
+ waybackpy.get(url, encoding="UTF-8", UA=user_agent)
|
||||
```
|
||||
> url is mandatory. UA is not, but highly recommended. encoding is detected automatically, don't specify unless necessary.
|
||||
> CLI documentation is at <https://github.com/akamhy/waybackpy/wiki/CLI-docs>.
|
||||
|
||||
```python
|
||||
from waybackpy import get
|
||||
# retriving the webpage from any url including the archived urls. Don't need to import other libraies :)
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
# supported argumnets are url, encoding and UA
|
||||
webpage = get("https://example.com/", UA="User-Agent")
|
||||
print(webpage)
|
||||
```
|
||||
> This should print the source code for <https://example.com/>.
|
||||
## <img src="https://github.githubassets.com/images/icons/emoji/unicode/1f6e1.png" width="20"></img> License
|
||||
|
||||
#### Count total archives for an URL using total_archives()
|
||||
[](https://github.com/akamhy/waybackpy/blob/master/LICENSE)
|
||||
|
||||
```diff
|
||||
+ waybackpy.total_archives(url, UA=user_agent)
|
||||
```
|
||||
> url is mandatory. UA is not, but highly recommended.
|
||||
Copyright (c) 2020-2022 Akash Mahanty Et al.
|
||||
|
||||
```python
|
||||
from waybackpy import total_archives
|
||||
# retriving the webpage from any url including the archived urls. Don't need to import other libraies :)
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
# supported argumnets are url and UA
|
||||
count = total_archives("https://en.wikipedia.org/wiki/Python (programming language)", UA="User-Agent")
|
||||
print(count)
|
||||
```
|
||||
> This should print an integer (int), which is the number of total archives on archive.org
|
||||
|
||||
## Tests
|
||||
* [Here](https://github.com/akamhy/waybackpy/tree/master/tests)
|
||||
|
||||
## Dependency
|
||||
* None, just python standard libraries (json, urllib and datetime). Both python 2 and 3 are supported :)
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[MIT License](https://github.com/akamhy/waybackpy/blob/master/LICENSE)
|
||||
Released under the MIT License. See [license](https://github.com/akamhy/waybackpy/blob/master/LICENSE) for details.
|
||||
|
@ -1 +1 @@
|
||||
theme: jekyll-theme-cayman
|
||||
theme: jekyll-theme-cayman
|
||||
|
14
assets/waybackpy_logo.svg
Normal file
14
assets/waybackpy_logo.svg
Normal file
@ -0,0 +1,14 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg width="711.80188pt" height="258.30469pt" viewBox="0 0 711.80188 258.30469" version="1.1" id="svg2" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="surface1" transform="translate(-40.045801,-148)">
|
||||
<path style="fill: rgb(171, 46, 51); fill-opacity: 1; fill-rule: nonzero; stroke: none;" d="M 224.09 309.814 L 224.09 197.997 L 204.768 197.994 L 204.768 312.635 C 204.768 312.635 205.098 312.9 204.105 313.698 C 203.113 314.497 202.408 313.849 202.408 313.849 L 200.518 313.849 L 200.518 197.991 L 181.139 197.991 L 181.139 313.849 L 179.253 313.849 C 179.253 313.849 178.544 314.497 177.551 313.698 C 176.558 312.9 176.888 312.635 176.888 312.635 L 176.888 197.994 L 157.57 197.997 L 157.57 309.814 C 157.57 309.814 156.539 316.772 162.615 321.658 C 168.691 326.546 177.551 326.049 177.551 326.049 L 204.11 326.049 C 204.11 326.049 212.965 326.546 219.041 321.658 C 225.118 316.772 224.09 309.814 224.09 309.814" id="path5"/>
|
||||
<path style="fill: rgb(171, 46, 51); fill-opacity: 1; fill-rule: nonzero; stroke: none;" d="M 253.892 299.821 C 253.892 299.821 253.632 300.965 251.888 300.965 C 250.143 300.965 249.629 299.821 249.629 299.821 L 249.629 278.477 C 249.629 278.477 249.433 278.166 250.078 277.645 C 250.726 277.124 251.243 277.179 251.243 277.179 L 253.892 277.228 Z M 251.588 199.144 C 230.266 199.144 231.071 213.218 231.071 213.218 L 231.071 254.303 L 249.675 254.303 L 249.675 213.69 C 249.675 213.69 249.775 211.276 251.787 211.276 C 253.8 211.276 254 213.542 254 213.542 L 254 265.146 L 246.156 265.146 C 246.156 265.146 240.022 264.579 235.495 268.22 C 230.968 271.858 231.071 276.791 231.071 276.791 L 231.071 298.955 C 231.071 298.955 229.461 308.016 238.914 312.058 C 248.368 316.103 254.805 309.795 254.805 309.795 L 254.805 312.706 L 272.508 312.706 L 272.508 212.895 C 272.508 212.895 272.907 199.144 251.588 199.144" id="path7"/>
|
||||
<path style="fill: rgb(171, 46, 51); fill-opacity: 1; fill-rule: nonzero; stroke: none;" d="M 404.682 318.261 C 404.682 318.261 404.398 319.494 402.485 319.494 C 400.568 319.494 400.001 318.261 400.001 318.261 L 400.001 295.216 C 400.001 295.216 399.786 294.879 400.496 294.315 C 401.208 293.757 401.776 293.812 401.776 293.812 L 404.682 293.868 Z M 402.152 209.568 C 378.728 209.568 379.61 224.761 379.61 224.761 L 379.61 269.117 L 400.051 269.117 L 400.051 225.273 C 400.051 225.273 400.162 222.665 402.374 222.665 C 404.582 222.665 404.805 225.109 404.805 225.109 L 404.805 280.82 L 396.187 280.82 C 396.187 280.82 389.447 280.213 384.475 284.141 C 379.499 288.072 379.61 293.396 379.61 293.396 L 379.61 317.324 C 379.61 317.324 377.843 327.104 388.232 331.469 C 398.616 335.838 405.69 329.027 405.69 329.027 L 405.69 332.169 L 425.133 332.169 L 425.133 224.413 C 425.133 224.413 425.578 209.568 402.152 209.568" id="path9"/>
|
||||
<path style="fill: rgb(171, 46, 51); fill-opacity: 1; fill-rule: nonzero; stroke: none;" d="M 321.114 328.636 L 321.114 206.587 L 302.582 206.587 L 302.582 304.902 C 302.582 304.902 303.211 307.094 300.624 307.094 C 298.035 307.094 298.316 304.902 298.316 304.902 L 298.316 206.587 L 279.784 206.587 C 279.784 206.587 279.922 304.338 279.922 306.756 C 279.922 309.175 280.27 310.526 280.831 312.379 C 281.391 314.238 282.579 318.116 290.901 319.186 C 299.224 320.256 302.44 315.813 302.44 315.813 L 302.44 327.736 C 302.44 327.736 302.862 329.366 300.554 329.366 C 298.246 329.366 298.316 327.849 298.316 327.849 L 298.316 322.957 L 279.642 322.957 L 279.642 327.791 C 279.642 327.791 278.523 341.514 300.274 341.514 C 322.026 341.514 321.114 328.636 321.114 328.636" id="path11"/>
|
||||
<path style="fill: rgb(171, 46, 51); fill-opacity: 1; fill-rule: nonzero; stroke: none;" d="M 352.449 209.811 L 352.449 273.495 C 352.449 277.49 347.911 277.194 347.911 277.194 L 347.911 207.592 C 347.911 207.592 346.929 207.542 349.567 207.542 C 352.817 207.542 352.449 209.811 352.449 209.811 M 352.326 310.393 C 352.326 310.393 352.143 312.366 350.425 312.366 L 348.033 312.366 L 348.033 289.478 L 349.628 289.478 C 349.628 289.478 352.326 289.428 352.326 292.092 Z M 371.341 287.505 C 371.341 284.791 370.727 282.966 368.826 280.993 C 366.925 279.02 363.367 277.441 363.367 277.441 C 363.367 277.441 365.514 276.948 368.704 274.728 C 371.893 272.509 371.525 267.921 371.525 267.921 L 371.525 212.919 C 371.525 212.919 371.801 204.509 366.925 200.587 C 362.049 196.665 352.515 196.363 352.515 196.363 L 328.711 196.363 L 328.711 324.107 L 350.609 324.107 C 360.055 324.107 364.594 322.232 368.336 318.286 C 372.077 314.34 371.341 308.321 371.341 308.321 Z M 371.341 287.505" id="path13"/>
|
||||
<path style="fill: rgb(171, 46, 51); fill-opacity: 1; fill-rule: nonzero; stroke: none;" d="M 452.747 226.744 L 452.747 268.806 L 471.581 268.806 L 471.581 227.459 C 471.581 227.459 471.846 213.532 450.516 213.532 C 429.182 213.532 430.076 227.533 430.076 227.533 L 430.076 313.381 C 430.076 313.381 428.825 327.523 450.872 327.523 C 472.919 327.523 471.401 313.526 471.401 313.526 L 471.401 292.064 L 452.835 292.064 L 452.835 314.389 C 452.835 314.389 452.923 315.61 450.961 315.61 C 448.997 315.61 448.729 314.389 448.729 314.389 L 448.729 226.524 C 448.729 226.524 448.821 225.378 450.692 225.378 C 452.566 225.378 452.747 226.744 452.747 226.744" id="path15"/>
|
||||
<path style="fill: rgb(171, 46, 51); fill-opacity: 1; fill-rule: nonzero; stroke: none;" d="M 520.624 281.841 C 517.672 278.98 514.317 277.904 514.317 277.904 C 514.317 277.904 517.538 277.796 520.489 274.775 C 523.442 271.753 523.173 267.924 523.173 267.924 L 523.173 208.211 L 503.185 208.211 L 503.185 276.014 C 503.185 276.014 503.185 277.361 501.172 277.361 L 498.761 277.309 L 498.761 191.655 L 478.973 191.655 L 478.973 327.905 L 498.692 327.905 L 498.692 290.039 L 501.709 290.039 C 501.709 290.039 502.112 290.039 502.648 290.523 C 503.185 291.01 503.185 291.602 503.185 291.602 L 503.185 327.905 L 523.307 327.905 L 523.307 288.636 C 523.307 288.636 523.576 284.699 520.624 281.841" id="path17"/>
|
||||
<path style="fill-opacity: 1; fill-rule: nonzero; stroke: none; fill: rgb(255, 222, 87);" d="M 638.021 327.182 L 638.021 205.132 L 619.489 205.132 L 619.489 303.448 C 619.489 303.448 620.119 305.64 617.53 305.64 C 614.944 305.64 615.223 303.448 615.223 303.448 L 615.223 205.132 L 596.692 205.132 C 596.692 205.132 596.83 302.884 596.83 305.301 C 596.83 307.721 597.178 309.071 597.738 310.924 C 598.299 312.784 599.487 316.662 607.809 317.732 C 616.132 318.802 619.349 314.359 619.349 314.359 L 619.349 326.281 C 619.349 326.281 619.77 327.913 617.462 327.913 C 615.154 327.913 615.223 326.396 615.223 326.396 L 615.223 321.502 L 596.55 321.502 L 596.55 326.336 C 596.55 326.336 595.43 340.059 617.182 340.059 C 638.934 340.059 638.021 327.182 638.021 327.182" id="path-1"/>
|
||||
<path d="M 592.159 233.846 C 593.222 238.576 593.75 243.873 593.745 249.735 C 593.74 255.598 593.135 261.281 591.931 266.782 C 590.726 272.285 588.901 277.144 586.453 281.361 C 584.006 285.578 580.938 288.946 577.248 291.466 C 573.559 293.985 569.226 295.246 564.25 295.246 C 561.585 295.246 559.008 294.936 556.521 294.32 C 554.033 293.703 551.813 292.854 549.859 291.774 C 547.905 290.694 546.284 289.512 544.997 288.226 C 543.71 286.94 542.934 285.578 542.668 284.138 L 542.629 328.722 L 526.369 328.722 L 526.475 207.466 L 541.003 207.466 L 542.728 216.259 C 544.507 213.38 547.197 211.065 550.797 209.317 C 554.397 207.568 558.374 206.694 562.728 206.694 C 565.66 206.694 568.637 207.157 571.657 208.083 C 574.677 209.008 577.497 210.551 580.116 212.711 C 582.735 214.871 585.11 217.698 587.239 221.196 C 589.369 224.692 591.009 228.909 592.159 233.846 Z M 558.932 280.744 C 561.597 280.744 564.019 279.972 566.197 278.429 C 568.376 276.887 570.243 274.804 571.801 272.182 C 573.358 269.559 574.582 266.423 575.474 262.772 C 576.366 259.121 576.814 255.238 576.817 251.124 C 576.821 247.113 576.424 243.307 575.628 239.708 C 574.831 236.108 573.701 232.92 572.237 230.143 C 570.774 227.366 568.999 225.155 566.912 223.51 C 564.825 221.864 562.405 221.041 559.65 221.041 C 556.985 221.041 554.54 221.813 552.318 223.356 C 550.095 224.898 548.183 226.981 546.581 229.603 C 544.98 232.226 543.755 235.311 542.908 238.86 C 542.061 242.408 541.635 246.239 541.632 250.353 C 541.628 254.466 542.002 258.349 542.754 262 C 543.506 265.651 544.637 268.865 546.145 271.642 C 547.653 274.419 549.472 276.63 551.603 278.276 C 553.734 279.922 556.177 280.744 558.932 280.744 Z" style="fill: rgb(69, 132, 182); white-space: pre;"/>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 8.3 KiB |
232
index.rst
232
index.rst
@ -1,232 +0,0 @@
|
||||
waybackpy
|
||||
=========
|
||||
|
||||
|Build Status| |Downloads| |Release| |Codacy Badge| |License: MIT|
|
||||
|Maintainability| |CodeFactor| |made-with-python| |pypi| |PyPI - Python
|
||||
Version| |Maintenance|
|
||||
|
||||
.. |Build Status| image:: https://travis-ci.org/akamhy/waybackpy.svg?branch=master
|
||||
:target: https://travis-ci.org/akamhy/waybackpy
|
||||
.. |Downloads| image:: https://img.shields.io/pypi/dm/waybackpy.svg
|
||||
:target: https://pypistats.org/packages/waybackpy
|
||||
.. |Release| image:: https://img.shields.io/github/v/release/akamhy/waybackpy.svg
|
||||
:target: https://github.com/akamhy/waybackpy/releases
|
||||
.. |Codacy Badge| image:: https://api.codacy.com/project/badge/Grade/255459cede9341e39436ec8866d3fb65
|
||||
:target: https://www.codacy.com/manual/akamhy/waybackpy?utm_source=github.com&utm_medium=referral&utm_content=akamhy/waybackpy&utm_campaign=Badge_Grade
|
||||
.. |License: MIT| image:: https://img.shields.io/badge/License-MIT-yellow.svg
|
||||
:target: https://github.com/akamhy/waybackpy/blob/master/LICENSE
|
||||
.. |Maintainability| image:: https://api.codeclimate.com/v1/badges/942f13d8177a56c1c906/maintainability
|
||||
:target: https://codeclimate.com/github/akamhy/waybackpy/maintainability
|
||||
.. |CodeFactor| image:: https://www.codefactor.io/repository/github/akamhy/waybackpy/badge
|
||||
:target: https://www.codefactor.io/repository/github/akamhy/waybackpy
|
||||
.. |made-with-python| image:: https://img.shields.io/badge/Made%20with-Python-1f425f.svg
|
||||
:target: https://www.python.org/
|
||||
.. |pypi| image:: https://img.shields.io/pypi/v/wayback.svg
|
||||
.. |PyPI - Python Version| image:: https://img.shields.io/pypi/pyversions/waybackpy?style=flat-square
|
||||
.. |Maintenance| image:: https://img.shields.io/badge/Maintained%3F-yes-green.svg
|
||||
:target: https://github.com/akamhy/waybackpy/graphs/commit-activity
|
||||
|
||||
|Internet Archive| |Wayback Machine|
|
||||
|
||||
The waybackpy is a python wrapper for `Internet Archive`_\ ’s `Wayback
|
||||
Machine`_.
|
||||
|
||||
.. _Internet Archive: https://en.wikipedia.org/wiki/Internet_Archive
|
||||
.. _Wayback Machine: https://en.wikipedia.org/wiki/Wayback_Machine
|
||||
|
||||
.. |Internet Archive| image:: https://upload.wikimedia.org/wikipedia/commons/thumb/8/84/Internet_Archive_logo_and_wordmark.svg/84px-Internet_Archive_logo_and_wordmark.svg.png
|
||||
.. |Wayback Machine| image:: https://upload.wikimedia.org/wikipedia/commons/thumb/0/01/Wayback_Machine_logo_2010.svg/284px-Wayback_Machine_logo_2010.svg.png
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
Using `pip`_:
|
||||
|
||||
**pip install waybackpy**
|
||||
|
||||
.. _pip: https://en.wikipedia.org/wiki/Pip_(package_manager)
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Archiving aka Saving an url Using save()
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: diff
|
||||
|
||||
+ waybackpy.save(url, UA=user_agent)
|
||||
|
||||
..
|
||||
|
||||
url is mandatory. UA is not, but highly recommended.
|
||||
|
||||
.. code:: python
|
||||
|
||||
import waybackpy
|
||||
# Capturing a new archive on Wayback machine.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
archived_url = waybackpy.save("https://github.com/akamhy/waybackpy", UA = "Any-User-Agent")
|
||||
print(archived_url)
|
||||
|
||||
This should print something similar to the following archived URL:
|
||||
|
||||
https://web.archive.org/web/20200504141153/https://github.com/akamhy/waybackpy
|
||||
|
||||
Receiving the oldest archive for an URL Using oldest()
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: diff
|
||||
|
||||
+ waybackpy.oldest(url, UA=user_agent)
|
||||
|
||||
..
|
||||
|
||||
url is mandatory. UA is not, but highly recommended.
|
||||
|
||||
.. code:: python
|
||||
|
||||
import waybackpy
|
||||
# retrieving the oldest archive on Wayback machine.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
oldest_archive = waybackpy.oldest("https://www.google.com/", UA = "Any-User-Agent")
|
||||
print(oldest_archive)
|
||||
|
||||
This returns the oldest available archive for https://google.com.
|
||||
|
||||
http://web.archive.org/web/19981111184551/http://google.com:80/
|
||||
|
||||
Receiving the newest archive for an URL using newest()
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: diff
|
||||
|
||||
+ waybackpy.newest(url, UA=user_agent)
|
||||
|
||||
..
|
||||
|
||||
url is mandatory. UA is not, but highly recommended.
|
||||
|
||||
.. code:: python
|
||||
|
||||
import waybackpy
|
||||
# retrieving the newest archive on Wayback machine.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
newest_archive = waybackpy.newest("https://www.microsoft.com/en-us", UA = "Any-User-Agent")
|
||||
print(newest_archive)
|
||||
|
||||
This returns the newest available archive for
|
||||
https://www.microsoft.com/en-us, something just like this:
|
||||
|
||||
http://web.archive.org/web/20200429033402/https://www.microsoft.com/en-us/
|
||||
|
||||
Receiving archive close to a specified year, month, day, hour, and minute using near()
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: diff
|
||||
|
||||
+ waybackpy.near(url, year=2020, month=1, day=1, hour=1, minute=1, UA=user_agent)
|
||||
|
||||
..
|
||||
|
||||
url is mandotory. year,month,day,hour and minute are optional
|
||||
arguments. UA is not mandotory, but higly recomended.
|
||||
|
||||
.. code:: python
|
||||
|
||||
import waybackpy
|
||||
# retriving the the closest archive from a specified year.
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
# supported argumnets are year,month,day,hour and minute
|
||||
archive_near_year = waybackpy.near("https://www.facebook.com/", year=2010, UA ="Any-User-Agent")
|
||||
print(archive_near_year)
|
||||
|
||||
returns :
|
||||
http://web.archive.org/web/20100504071154/http://www.facebook.com/
|
||||
|
||||
``waybackpy.near("https://www.facebook.com/", year=2010, month=1, UA ="Any-User-Agent")``
|
||||
returns:
|
||||
http://web.archive.org/web/20101111173430/http://www.facebook.com//
|
||||
|
||||
``waybackpy.near("https://www.oracle.com/index.html", year=2019, month=1, day=5, UA ="Any-User-Agent")``
|
||||
returns:
|
||||
http://web.archive.org/web/20190105054437/https://www.oracle.com/index.html
|
||||
> Please note that if you only specify the year, the current month and
|
||||
day are default arguments for month and day respectively. Do not expect
|
||||
just putting the year parameter would return the archive closer to
|
||||
January but the current month you are using the package. If you are
|
||||
using it in July 2018 and let’s say you use
|
||||
``waybackpy.near("https://www.facebook.com/", year=2011, UA ="Any-User-Agent")``
|
||||
then you would be returned the nearest archive to July 2011 and not
|
||||
January 2011. You need to specify the month “1” for January.
|
||||
|
||||
Do not pad (don’t use zeros in the month, year, day, minute, and hour
|
||||
arguments). e.g. For January, set month = 1 and not month = 01.
|
||||
|
||||
Get the content of webpage using get()
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: diff
|
||||
|
||||
+ waybackpy.get(url, encoding="UTF-8", UA=user_agent)
|
||||
|
||||
..
|
||||
|
||||
url is mandatory. UA is not, but highly recommended. encoding is
|
||||
detected automatically, don’t specify unless necessary.
|
||||
|
||||
.. code:: python
|
||||
|
||||
from waybackpy import get
|
||||
# retriving the webpage from any url including the archived urls. Don't need to import other libraies :)
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
# supported argumnets are url, encoding and UA
|
||||
webpage = get("https://example.com/", UA="User-Agent")
|
||||
print(webpage)
|
||||
|
||||
..
|
||||
|
||||
This should print the source code for https://example.com/.
|
||||
|
||||
Count total archives for an URL using total_archives()
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: diff
|
||||
|
||||
+ waybackpy.total_archives(url, UA=user_agent)
|
||||
|
||||
..
|
||||
|
||||
url is mandatory. UA is not, but highly recommended.
|
||||
|
||||
.. code:: python
|
||||
|
||||
from waybackpy import total_archives
|
||||
# retriving the webpage from any url including the archived urls. Don't need to import other libraies :)
|
||||
# Default user-agent (UA) is "waybackpy python package", if not specified in the call.
|
||||
# supported argumnets are url and UA
|
||||
count = total_archives("https://en.wikipedia.org/wiki/Python (programming language)", UA="User-Agent")
|
||||
print(count)
|
||||
|
||||
..
|
||||
|
||||
This should print an integer (int), which is the number of total
|
||||
archives on archive.org
|
||||
|
||||
Tests
|
||||
-----
|
||||
|
||||
- `Here`_
|
||||
|
||||
Dependency
|
||||
----------
|
||||
|
||||
- None, just python standard libraries (json, urllib and datetime).
|
||||
Both python 2 and 3 are supported :)
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
`MIT License`_
|
||||
|
||||
.. _Here: https://github.com/akamhy/waybackpy/tree/master/tests
|
||||
.. _MIT License: https://github.com/akamhy/waybackpy/blob/master/LICENSE
|
3
pyproject.toml
Normal file
3
pyproject.toml
Normal file
@ -0,0 +1,3 @@
|
||||
[build-system]
|
||||
requires = ["wheel", "setuptools"]
|
||||
build-backend = "setuptools.build_meta"
|
10
requirements-dev.txt
Normal file
10
requirements-dev.txt
Normal file
@ -0,0 +1,10 @@
|
||||
black
|
||||
click
|
||||
codecov
|
||||
flake8
|
||||
mypy
|
||||
pytest
|
||||
pytest-cov
|
||||
requests
|
||||
setuptools>=46.4.0
|
||||
types-requests
|
3
requirements.txt
Normal file
3
requirements.txt
Normal file
@ -0,0 +1,3 @@
|
||||
click
|
||||
requests
|
||||
urllib3
|
98
setup.cfg
98
setup.cfg
@ -1,3 +1,97 @@
|
||||
[metadata]
|
||||
description-file = README.md
|
||||
license_file = LICENSE
|
||||
name = waybackpy
|
||||
version = attr: waybackpy.__version__
|
||||
description = attr: waybackpy.__description__
|
||||
long_description = file: README.md
|
||||
long_description_content_type = text/markdown
|
||||
license = attr: waybackpy.__license__
|
||||
author = attr: waybackpy.__author__
|
||||
author_email = attr: waybackpy.__author_email__
|
||||
url = attr: waybackpy.__url__
|
||||
download_url = attr: waybackpy.__download_url__
|
||||
project_urls =
|
||||
Documentation = https://github.com/akamhy/waybackpy/wiki
|
||||
Source = https://github.com/akamhy/waybackpy
|
||||
Tracker = https://github.com/akamhy/waybackpy/issues
|
||||
keywords =
|
||||
Archive Website
|
||||
Wayback Machine
|
||||
Internet Archive
|
||||
Wayback Machine CLI
|
||||
Wayback Machine Python
|
||||
Internet Archiving
|
||||
Availability API
|
||||
CDX API
|
||||
savepagenow
|
||||
classifiers =
|
||||
Development Status :: 5 - Production/Stable
|
||||
Intended Audience :: Developers
|
||||
Intended Audience :: End Users/Desktop
|
||||
Natural Language :: English
|
||||
Typing :: Typed
|
||||
License :: OSI Approved :: MIT License
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.7
|
||||
Programming Language :: Python :: 3.8
|
||||
Programming Language :: Python :: 3.9
|
||||
Programming Language :: Python :: 3.10
|
||||
Programming Language :: Python :: Implementation :: CPython
|
||||
|
||||
[options]
|
||||
packages = find:
|
||||
python_requires = >= 3.7
|
||||
install_requires =
|
||||
click
|
||||
requests
|
||||
urllib3
|
||||
|
||||
[options.extras_require]
|
||||
dev =
|
||||
black
|
||||
codecov
|
||||
flake8
|
||||
mypy
|
||||
pytest
|
||||
pytest-cov
|
||||
setuptools>=46.4.0
|
||||
types-requests
|
||||
|
||||
[options.entry_points]
|
||||
console_scripts =
|
||||
waybackpy = waybackpy.cli:main
|
||||
|
||||
[isort]
|
||||
profile = black
|
||||
|
||||
[flake8]
|
||||
indent-size = 4
|
||||
max-line-length = 88
|
||||
extend-ignore = W503,W605
|
||||
exclude =
|
||||
venv
|
||||
__pycache__
|
||||
.venv
|
||||
./env
|
||||
venv/
|
||||
env
|
||||
.env
|
||||
./build
|
||||
|
||||
[mypy]
|
||||
python_version = 3.9
|
||||
show_error_codes = True
|
||||
pretty = True
|
||||
strict = True
|
||||
|
||||
[tool:pytest]
|
||||
addopts =
|
||||
# show summary of all tests that did not pass
|
||||
-ra
|
||||
# enable all warnings
|
||||
-Wd
|
||||
# coverage and html report
|
||||
--cov=waybackpy
|
||||
--cov-report=html
|
||||
testpaths =
|
||||
tests
|
||||
|
48
setup.py
48
setup.py
@ -1,49 +1,3 @@
|
||||
import os.path
|
||||
from setuptools import setup
|
||||
|
||||
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
|
||||
long_description = f.read()
|
||||
|
||||
about = {}
|
||||
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'waybackpy', '__version__.py'), 'r', 'utf-8') as f:
|
||||
exec(f.read(), about)
|
||||
|
||||
setup(
|
||||
name = about['__title__'],
|
||||
packages = ['waybackpy'],
|
||||
version = about['__version__'],
|
||||
description = about['__description__'],
|
||||
long_description=long_description,
|
||||
long_description_content_type='text/markdown',
|
||||
license= about['__license__'],
|
||||
author = about['__author__'],
|
||||
author_email = about['__author_email__'],
|
||||
url = about['__url__'],
|
||||
download_url = 'https://github.com/akamhy/waybackpy/archive/v1.4.tar.gz',
|
||||
keywords = ['wayback', 'archive', 'archive website', 'wayback machine', 'Internet Archive'],
|
||||
install_requires=[],
|
||||
python_requires= ">=2.7",
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Intended Audience :: Developers',
|
||||
'Natural Language :: English',
|
||||
'Topic :: Software Development :: Build Tools',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Programming Language :: Python',
|
||||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.2',
|
||||
'Programming Language :: Python :: 3.3',
|
||||
'Programming Language :: Python :: 3.4',
|
||||
'Programming Language :: Python :: 3.5',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Programming Language :: Python :: Implementation :: CPython',
|
||||
],
|
||||
project_urls={
|
||||
'Documentation': 'https://waybackpy.readthedocs.io',
|
||||
'Source': 'https://github.com/akamhy/waybackpy',
|
||||
},
|
||||
)
|
||||
setup()
|
||||
|
23
snapcraft.yaml
Normal file
23
snapcraft.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
name: waybackpy
|
||||
summary: Wayback Machine API interface and a command-line tool
|
||||
description: |
|
||||
Waybackpy is a CLI tool that interfaces with the Wayback Machine APIs.
|
||||
Wayback Machine has three client side public APIs, Save API,
|
||||
Availability API and CDX API. These three APIs can be accessed via
|
||||
the waybackpy from the terminal.
|
||||
version: git
|
||||
grade: stable
|
||||
confinement: strict
|
||||
base: core20
|
||||
architectures:
|
||||
- build-on: [arm64, armhf, amd64]
|
||||
|
||||
apps:
|
||||
waybackpy:
|
||||
command: bin/waybackpy
|
||||
plugs: [home, network, network-bind, removable-media]
|
||||
|
||||
parts:
|
||||
waybackpy:
|
||||
plugin: python
|
||||
source: https://github.com/akamhy/waybackpy.git
|
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
@ -1,98 +0,0 @@
|
||||
import sys
|
||||
sys.path.append("..")
|
||||
import waybackpy
|
||||
import pytest
|
||||
|
||||
|
||||
user_agent = "Mozilla/5.0 (Windows NT 6.2; rv:20.0) Gecko/20121202 Firefox/20.0"
|
||||
|
||||
def test_clean_url():
|
||||
test_url = " https://en.wikipedia.org/wiki/Network security "
|
||||
answer = "https://en.wikipedia.org/wiki/Network_security"
|
||||
test_result = waybackpy.clean_url(test_url)
|
||||
assert answer == test_result
|
||||
|
||||
def test_url_check():
|
||||
InvalidUrl = "http://wwwgooglecom/"
|
||||
with pytest.raises(Exception) as e_info:
|
||||
waybackpy.url_check(InvalidUrl)
|
||||
|
||||
def test_save():
|
||||
# Test for urls that exist and can be archived.
|
||||
url1="https://github.com/akamhy/waybackpy"
|
||||
archived_url1 = waybackpy.save(url1, UA=user_agent)
|
||||
assert url1 in archived_url1
|
||||
|
||||
# Test for urls that are incorrect.
|
||||
with pytest.raises(Exception) as e_info:
|
||||
url2 = "ha ha ha ha"
|
||||
waybackpy.save(url2, UA=user_agent)
|
||||
|
||||
# Test for urls not allowed to archive by robot.txt.
|
||||
with pytest.raises(Exception) as e_info:
|
||||
url3 = "http://www.archive.is/faq.html"
|
||||
waybackpy.save(url3, UA=user_agent)
|
||||
|
||||
# Non existent urls, test
|
||||
with pytest.raises(Exception) as e_info:
|
||||
url4 = "https://githfgdhshajagjstgeths537agajaajgsagudadhuss8762346887adsiugujsdgahub.us"
|
||||
archived_url4 = waybackpy.save(url4, UA=user_agent)
|
||||
|
||||
def test_near():
|
||||
url = "google.com"
|
||||
archive_near_year = waybackpy.near(url, year=2010, UA=user_agent)
|
||||
assert "2010" in archive_near_year
|
||||
|
||||
archive_near_month_year = waybackpy.near(url, year=2015, month=2, UA=user_agent)
|
||||
assert ("201502" in archive_near_month_year) or ("201501" in archive_near_month_year) or ("201503" in archive_near_month_year)
|
||||
|
||||
archive_near_day_month_year = waybackpy.near(url, year=2006, month=11, day=15, UA=user_agent)
|
||||
assert ("20061114" in archive_near_day_month_year) or ("20061115" in archive_near_day_month_year) or ("2006116" in archive_near_day_month_year)
|
||||
|
||||
archive_near_hour_day_month_year = waybackpy.near("www.python.org", year=2008, month=5, day=9, hour=15, UA=user_agent)
|
||||
assert ("2008050915" in archive_near_hour_day_month_year) or ("2008050914" in archive_near_hour_day_month_year) or ("2008050913" in archive_near_hour_day_month_year)
|
||||
|
||||
with pytest.raises(Exception) as e_info:
|
||||
NeverArchivedUrl = "https://ee_3n.wrihkeipef4edia.org/rwti5r_ki/Nertr6w_rork_rse7c_urity"
|
||||
waybackpy.near(NeverArchivedUrl, year=2010, UA=user_agent)
|
||||
|
||||
def test_oldest():
|
||||
url = "github.com/akamhy/waybackpy"
|
||||
archive_oldest = waybackpy.oldest(url, UA=user_agent)
|
||||
assert "20200504141153" in archive_oldest
|
||||
|
||||
def test_newest():
|
||||
url = "github.com/akamhy/waybackpy"
|
||||
archive_newest = waybackpy.newest(url, UA=user_agent)
|
||||
assert url in archive_newest
|
||||
|
||||
def test_get():
|
||||
oldest_google_archive = waybackpy.oldest("google.com", UA=user_agent)
|
||||
oldest_google_page_text = waybackpy.get(oldest_google_archive, UA=user_agent)
|
||||
assert "Welcome to Google" in oldest_google_page_text
|
||||
|
||||
def test_total_archives():
|
||||
|
||||
count1 = waybackpy.total_archives("https://en.wikipedia.org/wiki/Python (programming language)", UA=user_agent)
|
||||
assert count1 > 2000
|
||||
|
||||
count2 = waybackpy.total_archives("https://gaha.e4i3n.m5iai3kip6ied.cima/gahh2718gs/ahkst63t7gad8", UA=user_agent)
|
||||
assert count2 == 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_clean_url()
|
||||
print(".")
|
||||
test_url_check()
|
||||
print(".")
|
||||
test_get()
|
||||
print(".")
|
||||
test_near()
|
||||
print(".")
|
||||
test_newest()
|
||||
print(".")
|
||||
test_save()
|
||||
print(".")
|
||||
test_oldest()
|
||||
print(".")
|
||||
test_total_archives()
|
||||
print(".")
|
113
tests/test_availability_api.py
Normal file
113
tests/test_availability_api.py
Normal file
@ -0,0 +1,113 @@
|
||||
import random
|
||||
import string
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import pytest
|
||||
|
||||
from waybackpy.availability_api import WaybackMachineAvailabilityAPI
|
||||
from waybackpy.exceptions import (
|
||||
ArchiveNotInAvailabilityAPIResponse,
|
||||
InvalidJSONInAvailabilityAPIResponse,
|
||||
)
|
||||
|
||||
now = datetime.utcnow()
|
||||
url = "https://example.com/"
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
|
||||
"(KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36"
|
||||
)
|
||||
|
||||
|
||||
def rndstr(n: int) -> str:
|
||||
return "".join(
|
||||
random.choice(string.ascii_uppercase + string.digits) for _ in range(n)
|
||||
)
|
||||
|
||||
|
||||
def test_oldest() -> None:
|
||||
"""
|
||||
Test the oldest archive of Google.com and also checks the attributes.
|
||||
"""
|
||||
url = "https://example.com/"
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
|
||||
"(KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36"
|
||||
)
|
||||
availability_api = WaybackMachineAvailabilityAPI(url, user_agent)
|
||||
oldest = availability_api.oldest()
|
||||
oldest_archive_url = oldest.archive_url
|
||||
assert "2002" in oldest_archive_url
|
||||
oldest_timestamp = oldest.timestamp()
|
||||
assert abs(oldest_timestamp - now) > timedelta(days=7000) # More than 19 years
|
||||
assert (
|
||||
availability_api.json is not None
|
||||
and availability_api.json["archived_snapshots"]["closest"]["available"] is True
|
||||
)
|
||||
assert repr(oldest).find("example.com") != -1
|
||||
assert "2002" in str(oldest)
|
||||
|
||||
|
||||
def test_newest() -> None:
|
||||
"""
|
||||
Assuming that the recent most Google Archive was made no more earlier than
|
||||
last one day which is 86400 seconds.
|
||||
"""
|
||||
url = "https://www.youtube.com/"
|
||||
user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:96.0) Gecko/20100101 Firefox/96.0"
|
||||
availability_api = WaybackMachineAvailabilityAPI(url, user_agent)
|
||||
newest = availability_api.newest()
|
||||
newest_timestamp = newest.timestamp()
|
||||
# betting in favor that latest youtube archive was not before the last 3 days
|
||||
# high tarffic sites like youtube are archived mnay times a day, so seems
|
||||
# very reasonable to me.
|
||||
assert abs(newest_timestamp - now) < timedelta(seconds=86400 * 3)
|
||||
|
||||
|
||||
def test_invalid_json() -> None:
|
||||
"""
|
||||
When the API is malfunctioning or we don't pass a URL,
|
||||
it may return invalid JSON data.
|
||||
"""
|
||||
with pytest.raises(InvalidJSONInAvailabilityAPIResponse):
|
||||
availability_api = WaybackMachineAvailabilityAPI(url="", user_agent=user_agent)
|
||||
_ = availability_api.archive_url
|
||||
|
||||
|
||||
def test_no_archive() -> None:
|
||||
"""
|
||||
ArchiveNotInAvailabilityAPIResponse may be raised if Wayback Machine did not
|
||||
replied with the archive despite the fact that we know the site has million
|
||||
of archives. Don't know the reason for this wierd behavior.
|
||||
|
||||
And also if really there are no archives for the passed URL this exception
|
||||
is raised.
|
||||
"""
|
||||
with pytest.raises(ArchiveNotInAvailabilityAPIResponse):
|
||||
availability_api = WaybackMachineAvailabilityAPI(
|
||||
url=f"https://{rndstr(30)}.cn", user_agent=user_agent
|
||||
)
|
||||
_ = availability_api.archive_url
|
||||
|
||||
|
||||
def test_no_api_call_str_repr() -> None:
|
||||
"""
|
||||
Some entitled users maybe want to see what is the string representation
|
||||
if they don’t make any API requests.
|
||||
|
||||
str() must not return None so we return ""
|
||||
"""
|
||||
availability_api = WaybackMachineAvailabilityAPI(
|
||||
url=f"https://{rndstr(30)}.gov", user_agent=user_agent
|
||||
)
|
||||
assert str(availability_api) == ""
|
||||
|
||||
|
||||
def test_no_call_timestamp() -> None:
|
||||
"""
|
||||
If no API requests were made the bound timestamp() method returns
|
||||
the datetime.max as a default value.
|
||||
"""
|
||||
availability_api = WaybackMachineAvailabilityAPI(
|
||||
url=f"https://{rndstr(30)}.in", user_agent=user_agent
|
||||
)
|
||||
assert datetime.max == availability_api.timestamp()
|
42
tests/test_cdx_api.py
Normal file
42
tests/test_cdx_api.py
Normal file
@ -0,0 +1,42 @@
|
||||
from waybackpy.cdx_api import WaybackMachineCDXServerAPI
|
||||
|
||||
|
||||
def test_a() -> None:
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (MacBook Air; M1 Mac OS X 11_4) AppleWebKit/605.1.15 "
|
||||
"(KHTML, like Gecko) Version/14.1.1 Safari/604.1"
|
||||
)
|
||||
url = "https://twitter.com/jack"
|
||||
|
||||
wayback = WaybackMachineCDXServerAPI(
|
||||
url=url,
|
||||
user_agent=user_agent,
|
||||
match_type="prefix",
|
||||
collapses=["urlkey"],
|
||||
start_timestamp="201001",
|
||||
end_timestamp="201002",
|
||||
)
|
||||
# timeframe bound prefix matching enabled along with active urlkey based collapsing
|
||||
|
||||
snapshots = wayback.snapshots() # <class 'generator'>
|
||||
|
||||
for snapshot in snapshots:
|
||||
assert snapshot.timestamp.startswith("2010")
|
||||
|
||||
|
||||
def test_b() -> None:
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (MacBook Air; M1 Mac OS X 11_4) "
|
||||
"AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/604.1"
|
||||
)
|
||||
url = "https://www.google.com"
|
||||
|
||||
wayback = WaybackMachineCDXServerAPI(
|
||||
url=url, user_agent=user_agent, start_timestamp="202101", end_timestamp="202112"
|
||||
)
|
||||
# timeframe bound prefix matching enabled along with active urlkey based collapsing
|
||||
|
||||
snapshots = wayback.snapshots() # <class 'generator'>
|
||||
|
||||
for snapshot in snapshots:
|
||||
assert snapshot.timestamp.startswith("2021")
|
43
tests/test_cdx_snapshot.py
Normal file
43
tests/test_cdx_snapshot.py
Normal file
@ -0,0 +1,43 @@
|
||||
from datetime import datetime
|
||||
|
||||
from waybackpy.cdx_snapshot import CDXSnapshot
|
||||
|
||||
|
||||
def test_CDXSnapshot() -> None:
|
||||
sample_input = (
|
||||
"org,archive)/ 20080126045828 http://github.com "
|
||||
"text/html 200 Q4YULN754FHV2U6Q5JUT6Q2P57WEWNNY 1415"
|
||||
)
|
||||
prop_values = sample_input.split(" ")
|
||||
properties = {}
|
||||
(
|
||||
properties["urlkey"],
|
||||
properties["timestamp"],
|
||||
properties["original"],
|
||||
properties["mimetype"],
|
||||
properties["statuscode"],
|
||||
properties["digest"],
|
||||
properties["length"],
|
||||
) = prop_values
|
||||
|
||||
snapshot = CDXSnapshot(properties)
|
||||
|
||||
assert properties["urlkey"] == snapshot.urlkey
|
||||
assert properties["timestamp"] == snapshot.timestamp
|
||||
assert properties["original"] == snapshot.original
|
||||
assert properties["mimetype"] == snapshot.mimetype
|
||||
assert properties["statuscode"] == snapshot.statuscode
|
||||
assert properties["digest"] == snapshot.digest
|
||||
assert properties["length"] == snapshot.length
|
||||
assert (
|
||||
datetime.strptime(properties["timestamp"], "%Y%m%d%H%M%S")
|
||||
== snapshot.datetime_timestamp
|
||||
)
|
||||
archive_url = (
|
||||
"https://web.archive.org/web/"
|
||||
+ properties["timestamp"]
|
||||
+ "/"
|
||||
+ properties["original"]
|
||||
)
|
||||
assert archive_url == snapshot.archive_url
|
||||
assert sample_input == str(snapshot)
|
103
tests/test_cdx_utils.py
Normal file
103
tests/test_cdx_utils.py
Normal file
@ -0,0 +1,103 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import pytest
|
||||
|
||||
from waybackpy.cdx_utils import (
|
||||
check_collapses,
|
||||
check_filters,
|
||||
check_match_type,
|
||||
full_url,
|
||||
get_response,
|
||||
get_total_pages,
|
||||
)
|
||||
from waybackpy.exceptions import WaybackError
|
||||
|
||||
|
||||
def test_get_total_pages() -> None:
|
||||
url = "twitter.com"
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 "
|
||||
"(KHTML, like Gecko) Version/14.0.2 Safari/605.1.15"
|
||||
)
|
||||
assert get_total_pages(url=url, user_agent=user_agent) >= 56
|
||||
|
||||
|
||||
def test_full_url() -> None:
|
||||
endpoint = "https://web.archive.org/cdx/search/cdx"
|
||||
params: Dict[str, Any] = {}
|
||||
assert endpoint == full_url(endpoint, params)
|
||||
|
||||
params = {"a": "1"}
|
||||
assert full_url(endpoint, params) == "https://web.archive.org/cdx/search/cdx?a=1"
|
||||
assert (
|
||||
full_url(endpoint + "?", params) == "https://web.archive.org/cdx/search/cdx?a=1"
|
||||
)
|
||||
|
||||
params["b"] = 2
|
||||
assert (
|
||||
full_url(endpoint + "?", params)
|
||||
== "https://web.archive.org/cdx/search/cdx?a=1&b=2"
|
||||
)
|
||||
|
||||
params["c"] = "foo bar"
|
||||
assert (
|
||||
full_url(endpoint + "?", params)
|
||||
== "https://web.archive.org/cdx/search/cdx?a=1&b=2&c=foo%20bar"
|
||||
)
|
||||
|
||||
|
||||
def test_get_response() -> None:
|
||||
url = "https://github.com"
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0"
|
||||
)
|
||||
headers = {"User-Agent": str(user_agent)}
|
||||
response = get_response(url, headers=headers)
|
||||
assert not isinstance(response, Exception) and response.status_code == 200
|
||||
|
||||
|
||||
def test_check_filters() -> None:
|
||||
filters: List[str] = []
|
||||
check_filters(filters)
|
||||
|
||||
filters = ["statuscode:200", "timestamp:20215678901234", "original:https://url.com"]
|
||||
check_filters(filters)
|
||||
|
||||
with pytest.raises(WaybackError):
|
||||
check_filters("not-list") # type: ignore[arg-type]
|
||||
|
||||
with pytest.raises(WaybackError):
|
||||
check_filters(["invalid"])
|
||||
|
||||
|
||||
def test_check_collapses() -> None:
|
||||
collapses: List[str] = []
|
||||
check_collapses(collapses)
|
||||
|
||||
collapses = ["timestamp:10"]
|
||||
check_collapses(collapses)
|
||||
|
||||
collapses = ["urlkey"]
|
||||
check_collapses(collapses)
|
||||
|
||||
collapses = "urlkey" # type: ignore[assignment]
|
||||
with pytest.raises(WaybackError):
|
||||
check_collapses(collapses)
|
||||
|
||||
collapses = ["also illegal collapse"]
|
||||
with pytest.raises(WaybackError):
|
||||
check_collapses(collapses)
|
||||
|
||||
|
||||
def test_check_match_type() -> None:
|
||||
assert check_match_type(None, "url")
|
||||
match_type = "exact"
|
||||
url = "test_url"
|
||||
assert check_match_type(match_type, url)
|
||||
|
||||
url = "has * in it"
|
||||
with pytest.raises(WaybackError):
|
||||
check_match_type("domain", url)
|
||||
|
||||
with pytest.raises(WaybackError):
|
||||
check_match_type("not a valid type", "url")
|
169
tests/test_cli.py
Normal file
169
tests/test_cli.py
Normal file
@ -0,0 +1,169 @@
|
||||
import requests
|
||||
from click.testing import CliRunner
|
||||
|
||||
from waybackpy import __version__
|
||||
from waybackpy.cli import main
|
||||
|
||||
|
||||
def test_oldest() -> None:
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["--url", " https://github.com ", "--oldest"])
|
||||
assert result.exit_code == 0
|
||||
assert (
|
||||
result.output
|
||||
== "Archive URL:\nhttps://web.archive.org/web/2008051421\
|
||||
0148/http://github.com/\n"
|
||||
)
|
||||
|
||||
|
||||
def test_near() -> None:
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(
|
||||
main,
|
||||
[
|
||||
"--url",
|
||||
" https://facebook.com ",
|
||||
"--near",
|
||||
"--year",
|
||||
"2010",
|
||||
"--month",
|
||||
"5",
|
||||
"--day",
|
||||
"10",
|
||||
"--hour",
|
||||
"6",
|
||||
],
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
assert (
|
||||
result.output
|
||||
== "Archive URL:\nhttps://web.archive.org/web/2010051008\
|
||||
2647/http://www.facebook.com/\n"
|
||||
)
|
||||
|
||||
|
||||
def test_json() -> None:
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(
|
||||
main,
|
||||
[
|
||||
"--url",
|
||||
" https://apple.com ",
|
||||
"--near",
|
||||
"--year",
|
||||
"2010",
|
||||
"--month",
|
||||
"2",
|
||||
"--day",
|
||||
"8",
|
||||
"--hour",
|
||||
"12",
|
||||
"--json",
|
||||
],
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
assert (
|
||||
result.output.find(
|
||||
"""Archive URL:\nhttps://web.archive.org/web/2010020812\
|
||||
5854/http://www.apple.com/\nJSON respons\
|
||||
e:\n{"url": "https://apple.com", "archived_snapshots": {"close\
|
||||
st": {"status": "200", "available": true, "url": "http://web.ar\
|
||||
chive.org/web/20100208125854/http://www.apple.com/", "timest\
|
||||
amp": "20100208125854"}}, "timestamp":"""
|
||||
)
|
||||
!= -1
|
||||
)
|
||||
|
||||
|
||||
def test_newest() -> None:
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["--url", " https://microsoft.com ", "--newest"])
|
||||
assert result.exit_code == 0
|
||||
assert (
|
||||
result.output.find("microsoft.com") != -1
|
||||
and result.output.find("Archive URL:\n") != -1
|
||||
)
|
||||
|
||||
|
||||
def test_cdx() -> None:
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(
|
||||
main,
|
||||
"--url https://twitter.com/jack --cdx --user-agent some-user-agent \
|
||||
--start-timestamp 2010 --end-timestamp 2012 --collapse urlkey \
|
||||
--match-type prefix --cdx-print archiveurl --cdx-print length \
|
||||
--cdx-print digest --cdx-print statuscode --cdx-print mimetype \
|
||||
--cdx-print original --cdx-print timestamp --cdx-print urlkey".split(
|
||||
" "
|
||||
),
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
assert result.output.count("\n") > 3000
|
||||
|
||||
|
||||
def test_save() -> None:
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(
|
||||
main,
|
||||
"--url https://yahoo.com --user_agent my-unique-user-agent \
|
||||
--save --headers".split(
|
||||
" "
|
||||
),
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
assert result.output.find("Archive URL:") != -1
|
||||
assert (result.output.find("Cached save:\nTrue") != -1) or (
|
||||
result.output.find("Cached save:\nFalse") != -1
|
||||
)
|
||||
assert result.output.find("Save API headers:\n") != -1
|
||||
assert result.output.find("yahoo.com") != -1
|
||||
|
||||
|
||||
def test_version() -> None:
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["--version"])
|
||||
assert result.exit_code == 0
|
||||
assert result.output == f"waybackpy version {__version__}\n"
|
||||
|
||||
|
||||
def test_license() -> None:
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["--license"])
|
||||
assert result.exit_code == 0
|
||||
assert (
|
||||
result.output
|
||||
== requests.get(
|
||||
url="https://raw.githubusercontent.com/akamhy/waybackpy/master/LICENSE"
|
||||
).text
|
||||
+ "\n"
|
||||
)
|
||||
|
||||
|
||||
def test_only_url() -> None:
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["--url", "https://google.com"])
|
||||
assert result.exit_code == 0
|
||||
assert (
|
||||
result.output
|
||||
== "Only URL passed, but did not specify what to do with the URL. Use \
|
||||
--help flag for help using waybackpy.\n"
|
||||
)
|
||||
|
||||
|
||||
def test_known_url() -> None:
|
||||
# with file generator enabled
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(
|
||||
main, ["--url", "https://akamhy.github.io", "--known-urls", "--file"]
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
assert result.output.count("\n") > 40
|
||||
assert result.output.count("akamhy.github.io") > 40
|
||||
assert result.output.find("in the current working directory.\n") != -1
|
||||
|
||||
# without file
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["--url", "https://akamhy.github.io", "--known-urls"])
|
||||
assert result.exit_code == 0
|
||||
assert result.output.count("\n") > 40
|
||||
assert result.output.count("akamhy.github.io") > 40
|
222
tests/test_save_api.py
Normal file
222
tests/test_save_api.py
Normal file
@ -0,0 +1,222 @@
|
||||
import random
|
||||
import string
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import cast
|
||||
|
||||
import pytest
|
||||
from requests.structures import CaseInsensitiveDict
|
||||
|
||||
from waybackpy.exceptions import MaximumSaveRetriesExceeded
|
||||
from waybackpy.save_api import WaybackMachineSaveAPI
|
||||
|
||||
|
||||
def rndstr(n: int) -> str:
|
||||
return "".join(
|
||||
random.choice(string.ascii_uppercase + string.digits) for _ in range(n)
|
||||
)
|
||||
|
||||
|
||||
def test_save() -> None:
|
||||
url = "https://github.com/akamhy/waybackpy"
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (MacBook Air; M1 Mac OS X 11_4) AppleWebKit/605.1.15 "
|
||||
"(KHTML, like Gecko) Version/14.1.1 Safari/604.1"
|
||||
)
|
||||
save_api = WaybackMachineSaveAPI(url, user_agent)
|
||||
save_api.save()
|
||||
archive_url = save_api.archive_url
|
||||
timestamp = save_api.timestamp()
|
||||
headers = save_api.headers # CaseInsensitiveDict
|
||||
cached_save = save_api.cached_save
|
||||
assert cached_save in [True, False]
|
||||
assert archive_url.find("github.com/akamhy/waybackpy") != -1
|
||||
assert timestamp is not None
|
||||
assert str(headers).find("github.com/akamhy/waybackpy") != -1
|
||||
assert isinstance(save_api.timestamp(), datetime)
|
||||
|
||||
|
||||
def test_max_redirect_exceeded() -> None:
|
||||
with pytest.raises(MaximumSaveRetriesExceeded):
|
||||
url = f"https://{rndstr}.gov"
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (MacBook Air; M1 Mac OS X 11_4) AppleWebKit/605.1.15 "
|
||||
"(KHTML, like Gecko) Version/14.1.1 Safari/604.1"
|
||||
)
|
||||
save_api = WaybackMachineSaveAPI(url, user_agent, max_tries=3)
|
||||
save_api.save()
|
||||
|
||||
|
||||
def test_sleep() -> None:
|
||||
"""
|
||||
sleeping is actually very important for SaveAPI
|
||||
interface stability.
|
||||
The test checks that the time taken by sleep method
|
||||
is as intended.
|
||||
"""
|
||||
url = "https://example.com"
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (MacBook Air; M1 Mac OS X 11_4) AppleWebKit/605.1.15 "
|
||||
"(KHTML, like Gecko) Version/14.1.1 Safari/604.1"
|
||||
)
|
||||
save_api = WaybackMachineSaveAPI(url, user_agent)
|
||||
s_time = int(time.time())
|
||||
save_api.sleep(6) # multiple of 3 sleep for 10 seconds
|
||||
e_time = int(time.time())
|
||||
assert (e_time - s_time) >= 10
|
||||
|
||||
s_time = int(time.time())
|
||||
save_api.sleep(7) # sleeps for 5 seconds
|
||||
e_time = int(time.time())
|
||||
assert (e_time - s_time) >= 5
|
||||
|
||||
|
||||
def test_timestamp() -> None:
|
||||
url = "https://example.com"
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (MacBook Air; M1 Mac OS X 11_4) AppleWebKit/605.1.15 "
|
||||
"(KHTML, like Gecko) Version/14.1.1 Safari/604.1"
|
||||
)
|
||||
save_api = WaybackMachineSaveAPI(url, user_agent)
|
||||
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
|
||||
save_api._archive_url = f"https://web.archive.org/web/{now}/{url}/"
|
||||
save_api.timestamp()
|
||||
assert save_api.cached_save is False
|
||||
now = "20100124063622"
|
||||
save_api._archive_url = f"https://web.archive.org/web/{now}/{url}/"
|
||||
save_api.timestamp()
|
||||
assert save_api.cached_save is True
|
||||
|
||||
|
||||
def test_archive_url_parser() -> None:
|
||||
"""
|
||||
Testing three regex for matches and also tests the response URL.
|
||||
"""
|
||||
url = "https://example.com"
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (MacBook Air; M1 Mac OS X 11_4) AppleWebKit/605.1.15 "
|
||||
"(KHTML, like Gecko) Version/14.1.1 Safari/604.1"
|
||||
)
|
||||
save_api = WaybackMachineSaveAPI(url, user_agent)
|
||||
|
||||
h = (
|
||||
"\nSTART\nContent-Location: "
|
||||
"/web/20201126185327/https://www.scribbr.com/citing-sources/et-al"
|
||||
"\nEND\n"
|
||||
)
|
||||
save_api.headers = h # type: ignore[assignment]
|
||||
|
||||
expected_url = (
|
||||
"https://web.archive.org/web/20201126185327/"
|
||||
"https://www.scribbr.com/citing-sources/et-al"
|
||||
)
|
||||
assert save_api.archive_url_parser() == expected_url
|
||||
|
||||
headers = {
|
||||
"Server": "nginx/1.15.8",
|
||||
"Date": "Sat, 02 Jan 2021 09:40:25 GMT",
|
||||
"Content-Type": "text/html; charset=UTF-8",
|
||||
"Transfer-Encoding": "chunked",
|
||||
"Connection": "keep-alive",
|
||||
"X-Archive-Orig-Server": "nginx",
|
||||
"X-Archive-Orig-Date": "Sat, 02 Jan 2021 09:40:09 GMT",
|
||||
"X-Archive-Orig-Transfer-Encoding": "chunked",
|
||||
"X-Archive-Orig-Connection": "keep-alive",
|
||||
"X-Archive-Orig-Vary": "Accept-Encoding",
|
||||
"X-Archive-Orig-Last-Modified": "Fri, 01 Jan 2021 12:19:00 GMT",
|
||||
"X-Archive-Orig-Strict-Transport-Security": "max-age=31536000, max-age=0;",
|
||||
"X-Archive-Guessed-Content-Type": "text/html",
|
||||
"X-Archive-Guessed-Charset": "utf-8",
|
||||
"Memento-Datetime": "Sat, 02 Jan 2021 09:40:09 GMT",
|
||||
"Link": (
|
||||
'<https://www.scribbr.com/citing-sources/et-al/>; rel="original", '
|
||||
"<https://web.archive.org/web/timemap/link/https://www.scribbr.com/"
|
||||
'citing-sources/et-al/>; rel="timemap"; type="application/link-format", '
|
||||
"<https://web.archive.org/web/https://www.scribbr.com/citing-sources/"
|
||||
'et-al/>; rel="timegate", <https://web.archive.org/web/20200601082911/'
|
||||
'https://www.scribbr.com/citing-sources/et-al/>; rel="first memento"; '
|
||||
'datetime="Mon, 01 Jun 2020 08:29:11 GMT", <https://web.archive.org/web/'
|
||||
"20201126185327/https://www.scribbr.com/citing-sources/et-al/>; "
|
||||
'rel="prev memento"; datetime="Thu, 26 Nov 2020 18:53:27 GMT", '
|
||||
"<https://web.archive.org/web/20210102094009/https://www.scribbr.com/"
|
||||
'citing-sources/et-al/>; rel="memento"; datetime="Sat, 02 Jan 2021 '
|
||||
'09:40:09 GMT", <https://web.archive.org/web/20210102094009/'
|
||||
"https://www.scribbr.com/citing-sources/et-al/>; "
|
||||
'rel="last memento"; datetime="Sat, 02 Jan 2021 09:40:09 GMT"'
|
||||
),
|
||||
"Content-Security-Policy": (
|
||||
"default-src 'self' 'unsafe-eval' 'unsafe-inline' "
|
||||
"data: blob: archive.org web.archive.org analytics.archive.org "
|
||||
"pragma.archivelab.org",
|
||||
),
|
||||
"X-Archive-Src": "spn2-20210102092956-wwwb-spn20.us.archive.org-8001.warc.gz",
|
||||
"Server-Timing": (
|
||||
"captures_list;dur=112.646325, exclusion.robots;dur=0.172010, "
|
||||
"exclusion.robots.policy;dur=0.158205, RedisCDXSource;dur=2.205932, "
|
||||
"esindex;dur=0.014647, LoadShardBlock;dur=82.205012, "
|
||||
"PetaboxLoader3.datanode;dur=70.750239, CDXLines.iter;dur=24.306278, "
|
||||
"load_resource;dur=26.520179"
|
||||
),
|
||||
"X-App-Server": "wwwb-app200",
|
||||
"X-ts": "200",
|
||||
"X-location": "All",
|
||||
"X-Cache-Key": (
|
||||
"httpsweb.archive.org/web/20210102094009/"
|
||||
"https://www.scribbr.com/citing-sources/et-al/IN",
|
||||
),
|
||||
"X-RL": "0",
|
||||
"X-Page-Cache": "MISS",
|
||||
"X-Archive-Screenname": "0",
|
||||
"Content-Encoding": "gzip",
|
||||
}
|
||||
|
||||
save_api.headers = cast(CaseInsensitiveDict[str], headers)
|
||||
|
||||
expected_url2 = (
|
||||
"https://web.archive.org/web/20210102094009/"
|
||||
"https://www.scribbr.com/citing-sources/et-al/"
|
||||
)
|
||||
assert save_api.archive_url_parser() == expected_url2
|
||||
|
||||
expected_url_3 = (
|
||||
"https://web.archive.org/web/20171128185327/"
|
||||
"https://www.scribbr.com/citing-sources/et-al/US"
|
||||
)
|
||||
h = f"START\nX-Cache-Key: {expected_url_3}\nEND\n"
|
||||
save_api.headers = h # type: ignore[assignment]
|
||||
|
||||
expected_url4 = (
|
||||
"https://web.archive.org/web/20171128185327/"
|
||||
"https://www.scribbr.com/citing-sources/et-al/"
|
||||
)
|
||||
assert save_api.archive_url_parser() == expected_url4
|
||||
|
||||
h = "TEST TEST TEST AND NO MATCH - TEST FOR RESPONSE URL MATCHING"
|
||||
save_api.headers = h # type: ignore[assignment]
|
||||
save_api.response_url = (
|
||||
"https://web.archive.org/web/20171128185327/"
|
||||
"https://www.scribbr.com/citing-sources/et-al"
|
||||
)
|
||||
expected_url5 = (
|
||||
"https://web.archive.org/web/20171128185327/"
|
||||
"https://www.scribbr.com/citing-sources/et-al"
|
||||
)
|
||||
assert save_api.archive_url_parser() == expected_url5
|
||||
|
||||
|
||||
def test_archive_url() -> None:
|
||||
"""
|
||||
Checks the attribute archive_url's value when the save method was not
|
||||
explicitly invoked by the end-user but the save method was invoked implicitly
|
||||
by the archive_url method which is an attribute due to @property.
|
||||
"""
|
||||
url = "https://example.com"
|
||||
user_agent = (
|
||||
"Mozilla/5.0 (MacBook Air; M1 Mac OS X 11_4) AppleWebKit/605.1.15 "
|
||||
"(KHTML, like Gecko) Version/14.1.1 Safari/604.1"
|
||||
)
|
||||
save_api = WaybackMachineSaveAPI(url, user_agent)
|
||||
save_api.saved_archive = (
|
||||
"https://web.archive.org/web/20220124063056/https://example.com/"
|
||||
)
|
||||
assert save_api.archive_url == save_api.saved_archive
|
9
tests/test_utils.py
Normal file
9
tests/test_utils.py
Normal file
@ -0,0 +1,9 @@
|
||||
from waybackpy import __version__
|
||||
from waybackpy.utils import DEFAULT_USER_AGENT
|
||||
|
||||
|
||||
def test_default_user_agent() -> None:
|
||||
assert (
|
||||
DEFAULT_USER_AGENT
|
||||
== f"waybackpy {__version__} - https://github.com/akamhy/waybackpy"
|
||||
)
|
38
tests/test_wrapper.py
Normal file
38
tests/test_wrapper.py
Normal file
@ -0,0 +1,38 @@
|
||||
from waybackpy.wrapper import Url
|
||||
|
||||
|
||||
def test_oldest() -> None:
|
||||
url = "https://bing.com"
|
||||
oldest_archive = (
|
||||
"https://web.archive.org/web/20030726111100/http://www.bing.com:80/"
|
||||
)
|
||||
wayback = Url(url).oldest()
|
||||
assert wayback.archive_url == oldest_archive
|
||||
assert str(wayback) == oldest_archive
|
||||
assert len(wayback) > 365 * 15 # days in a year times years
|
||||
|
||||
|
||||
def test_newest() -> None:
|
||||
url = "https://www.youtube.com/"
|
||||
wayback = Url(url).newest()
|
||||
assert "youtube" in str(wayback.archive_url)
|
||||
assert "archived_snapshots" in str(wayback.json)
|
||||
|
||||
|
||||
def test_near() -> None:
|
||||
url = "https://www.google.com"
|
||||
wayback = Url(url).near(year=2010, month=10, day=10, hour=10, minute=10)
|
||||
assert "20101010" in str(wayback.archive_url)
|
||||
|
||||
|
||||
def test_total_archives() -> None:
|
||||
wayback = Url("https://akamhy.github.io")
|
||||
assert wayback.total_archives() > 10
|
||||
|
||||
wayback = Url("https://gaha.ef4i3n.m5iai3kifp6ied.cima/gahh2718gs/ahkst63t7gad8")
|
||||
assert wayback.total_archives() == 0
|
||||
|
||||
|
||||
def test_known_urls() -> None:
|
||||
wayback = Url("akamhy.github.io")
|
||||
assert len(list(wayback.known_urls())) > 40
|
@ -1,30 +1,35 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Module initializer and provider of static information."""
|
||||
|
||||
# ┏┓┏┓┏┓━━━━━━━━━━┏━━┓━━━━━━━━━━┏┓━━┏━━━┓━━━━━
|
||||
# ┃┃┃┃┃┃━━━━━━━━━━┃┏┓┃━━━━━━━━━━┃┃━━┃┏━┓┃━━━━━
|
||||
# ┃┃┃┃┃┃┏━━┓━┏┓━┏┓┃┗┛┗┓┏━━┓━┏━━┓┃┃┏┓┃┗━┛┃┏┓━┏┓
|
||||
# ┃┗┛┗┛┃┗━┓┃━┃┃━┃┃┃┏━┓┃┗━┓┃━┃┏━┛┃┗┛┛┃┏━━┛┃┃━┃┃
|
||||
# ┗┓┏┓┏┛┃┗┛┗┓┃┗━┛┃┃┗━┛┃┃┗┛┗┓┃┗━┓┃┏┓┓┃┃━━━┃┗━┛┃
|
||||
# ━┗┛┗┛━┗━━━┛┗━┓┏┛┗━━━┛┗━━━┛┗━━┛┗┛┗┛┗┛━━━┗━┓┏┛
|
||||
# ━━━━━━━━━━━┏━┛┃━━━━━━━━━━━━━━━━━━━━━━━━┏━┛┃━
|
||||
# ━━━━━━━━━━━┗━━┛━━━━━━━━━━━━━━━━━━━━━━━━┗━━┛━
|
||||
__title__ = "waybackpy"
|
||||
__description__ = (
|
||||
"Python package that interfaces with the Internet Archive's Wayback Machine APIs. "
|
||||
"Archive pages and retrieve archived pages easily."
|
||||
)
|
||||
__url__ = "https://akamhy.github.io/waybackpy/"
|
||||
__version__ = "3.0.3"
|
||||
__download_url__ = f"https://github.com/akamhy/waybackpy/archive/{__version__}.tar.gz"
|
||||
__author__ = "Akash Mahanty"
|
||||
__author_email__ = "akamhy@yahoo.com"
|
||||
__license__ = "MIT"
|
||||
__copyright__ = "Copyright 2020-2022 Akash Mahanty et al."
|
||||
|
||||
"""
|
||||
A python wrapper for Internet Archive's Wayback Machine API.
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
from .availability_api import WaybackMachineAvailabilityAPI
|
||||
from .cdx_api import WaybackMachineCDXServerAPI
|
||||
from .save_api import WaybackMachineSaveAPI
|
||||
from .wrapper import Url
|
||||
|
||||
Archive pages and retrieve archived pages easily.
|
||||
Usage:
|
||||
>>> import waybackpy
|
||||
>>> new_archive = waybackpy.save('https://www.python.org')
|
||||
>>> print(new_archive)
|
||||
https://web.archive.org/web/20200502170312/https://www.python.org/
|
||||
|
||||
Full documentation @ <https://akamhy.github.io/waybackpy/>.
|
||||
:copyright: (c) 2020 by akamhy.
|
||||
:license: MIT
|
||||
"""
|
||||
|
||||
from .wrapper import save, near, oldest, newest, get, clean_url, url_check, total_archives
|
||||
from .__version__ import __title__, __description__, __url__, __version__
|
||||
from .__version__ import __author__, __author_email__, __license__, __copyright__
|
||||
__all__ = [
|
||||
"__author__",
|
||||
"__author_email__",
|
||||
"__copyright__",
|
||||
"__description__",
|
||||
"__license__",
|
||||
"__title__",
|
||||
"__url__",
|
||||
"__download_url__",
|
||||
"__version__",
|
||||
"WaybackMachineAvailabilityAPI",
|
||||
"WaybackMachineCDXServerAPI",
|
||||
"WaybackMachineSaveAPI",
|
||||
"Url",
|
||||
]
|
||||
|
@ -1,8 +0,0 @@
|
||||
__title__ = "waybackpy"
|
||||
__description__ = "A python wrapper for Internet Archive's Wayback Machine API. Archive pages and retrieve archived pages easily."
|
||||
__url__ = "https://akamhy.github.io/waybackpy/"
|
||||
__version__ = "v1.5"
|
||||
__author__ = "akamhy"
|
||||
__author_email__ = "akash3pro@gmail.com"
|
||||
__license__ = "MIT"
|
||||
__copyright__ = "Copyright 2020 akamhy"
|
261
waybackpy/availability_api.py
Normal file
261
waybackpy/availability_api.py
Normal file
@ -0,0 +1,261 @@
|
||||
"""
|
||||
This module interfaces the Wayback Machine's availability API.
|
||||
|
||||
The interface is useful for looking up archives and finding archives
|
||||
that are close to a specific date and time.
|
||||
|
||||
It has a class WaybackMachineAvailabilityAPI, and the class has
|
||||
methods like:
|
||||
|
||||
near() for retrieving archives close to a specific date and time.
|
||||
|
||||
oldest() for retrieving the first archive URL of the webpage.
|
||||
|
||||
newest() for retrieving the latest archive of the webpage.
|
||||
|
||||
The Wayback Machine Availability API response must be a valid JSON and
|
||||
if it is not then an exception, InvalidJSONInAvailabilityAPIResponse is raised.
|
||||
|
||||
If the Availability API returned valid JSON but archive URL could not be found
|
||||
it it then ArchiveNotInAvailabilityAPIResponse is raised.
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import requests
|
||||
from requests.models import Response
|
||||
|
||||
from .exceptions import (
|
||||
ArchiveNotInAvailabilityAPIResponse,
|
||||
InvalidJSONInAvailabilityAPIResponse,
|
||||
)
|
||||
from .utils import DEFAULT_USER_AGENT
|
||||
|
||||
ResponseJSON = Dict[str, Any]
|
||||
|
||||
|
||||
class WaybackMachineAvailabilityAPI:
|
||||
"""
|
||||
Class that interfaces the Wayback Machine's availability API.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, url: str, user_agent: str = DEFAULT_USER_AGENT, max_tries: int = 3
|
||||
) -> None:
|
||||
|
||||
self.url = str(url).strip().replace(" ", "%20")
|
||||
self.user_agent = user_agent
|
||||
self.headers: Dict[str, str] = {"User-Agent": self.user_agent}
|
||||
self.payload: Dict[str, str] = {"url": self.url}
|
||||
self.endpoint: str = "https://archive.org/wayback/available"
|
||||
self.max_tries: int = max_tries
|
||||
self.tries: int = 0
|
||||
self.last_api_call_unix_time: int = int(time.time())
|
||||
self.api_call_time_gap: int = 5
|
||||
self.json: Optional[ResponseJSON] = None
|
||||
self.response: Optional[Response] = None
|
||||
|
||||
@staticmethod
|
||||
def unix_timestamp_to_wayback_timestamp(unix_timestamp: int) -> str:
|
||||
"""
|
||||
Converts Unix time to Wayback Machine timestamp, Wayback Machine
|
||||
timestamp format is yyyyMMddhhmmss.
|
||||
"""
|
||||
return datetime.utcfromtimestamp(int(unix_timestamp)).strftime("%Y%m%d%H%M%S")
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""
|
||||
Same as string representation, just return the archive URL as a string.
|
||||
"""
|
||||
return str(self)
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""
|
||||
String representation of the class. If atleast one API
|
||||
call was successfully made then return the archive URL
|
||||
as a string. Else returns "" (empty string literal).
|
||||
"""
|
||||
# __str__ can not return anything other than a string object
|
||||
# So, if a string repr is asked even before making a API request
|
||||
# just return ""
|
||||
if not self.json:
|
||||
return ""
|
||||
|
||||
return self.archive_url
|
||||
|
||||
def setup_json(self) -> Optional[ResponseJSON]:
|
||||
"""
|
||||
Makes the API call to the availability API and set the JSON response
|
||||
to the JSON attribute of the instance and also returns the JSON
|
||||
attribute.
|
||||
|
||||
time_diff and sleep_time makes sure that you are not making too many
|
||||
requests in a short interval of item, making too many requests is bad
|
||||
as Wayback Machine may reject them above a certain threshold.
|
||||
|
||||
The end-user can change the api_call_time_gap attribute of the instance
|
||||
to increase or decrease the default time gap between two successive API
|
||||
calls, but it is not recommended to increase it.
|
||||
"""
|
||||
time_diff = int(time.time()) - self.last_api_call_unix_time
|
||||
sleep_time = self.api_call_time_gap - time_diff
|
||||
|
||||
if sleep_time > 0:
|
||||
time.sleep(sleep_time)
|
||||
|
||||
self.response = requests.get(
|
||||
self.endpoint, params=self.payload, headers=self.headers
|
||||
)
|
||||
self.last_api_call_unix_time = int(time.time())
|
||||
self.tries += 1
|
||||
try:
|
||||
self.json = None if self.response is None else self.response.json()
|
||||
except json.decoder.JSONDecodeError as json_decode_error:
|
||||
raise InvalidJSONInAvailabilityAPIResponse(
|
||||
f"Response data:\n{self.response.text}"
|
||||
) from json_decode_error
|
||||
|
||||
return self.json
|
||||
|
||||
def timestamp(self) -> datetime:
|
||||
"""
|
||||
Converts the timestamp form the JSON response to datetime object.
|
||||
If JSON attribute of the instance is None it implies that the either
|
||||
the the last API call failed or one was never made.
|
||||
|
||||
If not JSON or if JSON but no timestamp in the JSON response then
|
||||
returns the maximum value for datetime object that is possible.
|
||||
|
||||
If you get an URL as a response form the availability API it is
|
||||
guaranteed that you can get the datetime object from the timestamp.
|
||||
"""
|
||||
if self.json is None or "archived_snapshots" not in self.json:
|
||||
return datetime.max
|
||||
|
||||
if (
|
||||
self.json is not None
|
||||
and "archived_snapshots" in self.json
|
||||
and self.json["archived_snapshots"] is not None
|
||||
and "closest" in self.json["archived_snapshots"]
|
||||
and self.json["archived_snapshots"]["closest"] is not None
|
||||
and "timestamp" in self.json["archived_snapshots"]["closest"]
|
||||
):
|
||||
return datetime.strptime(
|
||||
self.json["archived_snapshots"]["closest"]["timestamp"], "%Y%m%d%H%M%S"
|
||||
)
|
||||
|
||||
raise ValueError("Timestamp not found in the Availability API's JSON response.")
|
||||
|
||||
@property
|
||||
def archive_url(self) -> str:
|
||||
"""
|
||||
Reads the the JSON response data and returns
|
||||
the timestamp if found and if not found raises
|
||||
ArchiveNotInAvailabilityAPIResponse.
|
||||
"""
|
||||
archive_url = ""
|
||||
data = self.json
|
||||
|
||||
# If the user didn't invoke oldest, newest or near but tries to access
|
||||
# archive_url attribute then assume they that are fine with any archive
|
||||
# and invoke the oldest method.
|
||||
if not data:
|
||||
self.oldest()
|
||||
|
||||
# If data is still not none then probably there are no
|
||||
# archive for the requested URL.
|
||||
if not data or not data["archived_snapshots"]:
|
||||
while (self.tries < self.max_tries) and (
|
||||
not data or not data["archived_snapshots"]
|
||||
):
|
||||
self.setup_json() # It makes a new API call
|
||||
data = self.json # setup_json() updates value of json attribute
|
||||
|
||||
# If exhausted max_tries, then give up and
|
||||
# raise ArchiveNotInAvailabilityAPIResponse.
|
||||
|
||||
if not data or not data["archived_snapshots"]:
|
||||
raise ArchiveNotInAvailabilityAPIResponse(
|
||||
"Archive not found in the availability "
|
||||
"API response, the URL you requested may not have any archives "
|
||||
"yet. You may retry after some time or archive the webpage now.\n"
|
||||
"Response data:\n"
|
||||
""
|
||||
if self.response is None
|
||||
else self.response.text
|
||||
)
|
||||
else:
|
||||
archive_url = data["archived_snapshots"]["closest"]["url"]
|
||||
archive_url = archive_url.replace(
|
||||
"http://web.archive.org/web/", "https://web.archive.org/web/", 1
|
||||
)
|
||||
return archive_url
|
||||
|
||||
@staticmethod
|
||||
def wayback_timestamp(**kwargs: int) -> str:
|
||||
"""
|
||||
Prepends zero before the year, month, day, hour and minute so that they
|
||||
are conformable with the YYYYMMDDhhmmss Wayback Machine timestamp format.
|
||||
"""
|
||||
return "".join(
|
||||
str(kwargs[key]).zfill(2)
|
||||
for key in ["year", "month", "day", "hour", "minute"]
|
||||
)
|
||||
|
||||
def oldest(self) -> "WaybackMachineAvailabilityAPI":
|
||||
"""
|
||||
Passes the date 1994-01-01 to near which should return the oldest archive
|
||||
because Wayback Machine was started in May, 1996 and it is assumed that
|
||||
there would be no archive older than January 1, 1994.
|
||||
"""
|
||||
return self.near(year=1994, month=1, day=1)
|
||||
|
||||
def newest(self) -> "WaybackMachineAvailabilityAPI":
|
||||
"""
|
||||
Passes the current UNIX time to near() for retrieving the newest archive
|
||||
from the availability API.
|
||||
|
||||
Remember UNIX time is UTC and Wayback Machine is also UTC based.
|
||||
"""
|
||||
return self.near(unix_timestamp=int(time.time()))
|
||||
|
||||
def near(
|
||||
self,
|
||||
year: Optional[int] = None,
|
||||
month: Optional[int] = None,
|
||||
day: Optional[int] = None,
|
||||
hour: Optional[int] = None,
|
||||
minute: Optional[int] = None,
|
||||
unix_timestamp: Optional[int] = None,
|
||||
) -> "WaybackMachineAvailabilityAPI":
|
||||
"""
|
||||
The most important method of this Class, oldest() and newest() are
|
||||
dependent on it.
|
||||
|
||||
It generates the timestamp based on the input either by calling the
|
||||
unix_timestamp_to_wayback_timestamp or wayback_timestamp method with
|
||||
appropriate arguments for their respective parameters.
|
||||
|
||||
Adds the timestamp to the payload dictionary.
|
||||
|
||||
And finally invokes the setup_json method to make the API call then
|
||||
finally returns the instance.
|
||||
"""
|
||||
if unix_timestamp:
|
||||
timestamp = self.unix_timestamp_to_wayback_timestamp(unix_timestamp)
|
||||
else:
|
||||
now = datetime.utcnow().timetuple()
|
||||
timestamp = self.wayback_timestamp(
|
||||
year=now.tm_year if year is None else year,
|
||||
month=now.tm_mon if month is None else month,
|
||||
day=now.tm_mday if day is None else day,
|
||||
hour=now.tm_hour if hour is None else hour,
|
||||
minute=now.tm_min if minute is None else minute,
|
||||
)
|
||||
|
||||
self.payload["timestamp"] = timestamp
|
||||
self.setup_json()
|
||||
return self
|
261
waybackpy/cdx_api.py
Normal file
261
waybackpy/cdx_api.py
Normal file
@ -0,0 +1,261 @@
|
||||
"""
|
||||
This module interfaces the Wayback Machine's CDX server API.
|
||||
|
||||
The module has WaybackMachineCDXServerAPI which should be used by the users of
|
||||
this module to consume the CDX server API.
|
||||
|
||||
WaybackMachineCDXServerAPI has a snapshot method that yields the snapshots, and
|
||||
the snapshots are yielded as instances of the CDXSnapshot class.
|
||||
"""
|
||||
|
||||
|
||||
from typing import Dict, Generator, List, Optional, cast
|
||||
|
||||
from .cdx_snapshot import CDXSnapshot
|
||||
from .cdx_utils import (
|
||||
check_collapses,
|
||||
check_filters,
|
||||
check_match_type,
|
||||
full_url,
|
||||
get_response,
|
||||
get_total_pages,
|
||||
)
|
||||
from .exceptions import WaybackError
|
||||
from .utils import DEFAULT_USER_AGENT
|
||||
|
||||
|
||||
class WaybackMachineCDXServerAPI:
|
||||
"""
|
||||
Class that interfaces the CDX server API of the Wayback Machine.
|
||||
|
||||
snapshot() returns a generator that can be iterated upon by the end-user,
|
||||
the generator returns the snapshots/entries as instance of CDXSnapshot to
|
||||
make the usage easy, just use '.' to get any attribute as the attributes are
|
||||
accessible via a dot ".".
|
||||
"""
|
||||
|
||||
# start_timestamp: from, can not use from as it's a keyword
|
||||
# end_timestamp: to, not using to as can not use from
|
||||
def __init__(
|
||||
self,
|
||||
url: str,
|
||||
user_agent: str = DEFAULT_USER_AGENT,
|
||||
start_timestamp: Optional[str] = None,
|
||||
end_timestamp: Optional[str] = None,
|
||||
filters: Optional[List[str]] = None,
|
||||
match_type: Optional[str] = None,
|
||||
gzip: Optional[str] = None,
|
||||
collapses: Optional[List[str]] = None,
|
||||
limit: Optional[str] = None,
|
||||
max_tries: int = 3,
|
||||
) -> None:
|
||||
self.url = str(url).strip().replace(" ", "%20")
|
||||
self.user_agent = user_agent
|
||||
self.start_timestamp = None if start_timestamp is None else str(start_timestamp)
|
||||
self.end_timestamp = None if end_timestamp is None else str(end_timestamp)
|
||||
self.filters = [] if filters is None else filters
|
||||
check_filters(self.filters)
|
||||
self.match_type = None if match_type is None else str(match_type).strip()
|
||||
check_match_type(self.match_type, self.url)
|
||||
self.gzip = gzip
|
||||
self.collapses = [] if collapses is None else collapses
|
||||
check_collapses(self.collapses)
|
||||
self.limit = 25000 if limit is None else limit
|
||||
self.max_tries = max_tries
|
||||
self.last_api_request_url: Optional[str] = None
|
||||
self.use_page = False
|
||||
self.endpoint = "https://web.archive.org/cdx/search/cdx"
|
||||
|
||||
def cdx_api_manager(
|
||||
self, payload: Dict[str, str], headers: Dict[str, str], use_page: bool = False
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Manages the API calls for the instance, it automatically selects the best
|
||||
parameters by looking as the query of the end-user. For bigger queries
|
||||
automatically use the CDX pagination API and for smaller queries use the
|
||||
normal API.
|
||||
|
||||
CDX Server API is a complex API and to make it easy for the end user to
|
||||
consume it the CDX manager(this method) handles the selection of the
|
||||
API output, whether to use the pagination API or not.
|
||||
|
||||
For doing large/bulk queries, the use of the Pagination API is
|
||||
recommended by the Wayback Machine authors. And it determines if the
|
||||
query would be large or not by using the showNumPages=true parameter,
|
||||
this tells the number of pages of CDX DATA that the pagination API
|
||||
will return.
|
||||
|
||||
If the number of page is less than 2 we use the normal non-pagination
|
||||
API as the pagination API is known to lag and for big queries it should
|
||||
not matter but for queries where the number of pages are less this
|
||||
method chooses accuracy over the pagination API.
|
||||
"""
|
||||
# number of pages that will returned by the pagination API.
|
||||
# get_total_pages adds the showNumPages=true param to pagination API
|
||||
# requests.
|
||||
# This is a special query that will return a single number indicating
|
||||
# the number of pages.
|
||||
total_pages = get_total_pages(self.url, self.user_agent)
|
||||
|
||||
if use_page is True and total_pages >= 2:
|
||||
blank_pages = 0
|
||||
for i in range(total_pages):
|
||||
payload["page"] = str(i)
|
||||
|
||||
url = full_url(self.endpoint, params=payload)
|
||||
res = get_response(url, headers=headers)
|
||||
if isinstance(res, Exception):
|
||||
raise res
|
||||
|
||||
self.last_api_request_url = url
|
||||
text = res.text
|
||||
if len(text) == 0:
|
||||
blank_pages += 1
|
||||
|
||||
if blank_pages >= 2:
|
||||
break
|
||||
|
||||
yield text
|
||||
else:
|
||||
payload["showResumeKey"] = "true"
|
||||
payload["limit"] = str(self.limit)
|
||||
resume_key = None
|
||||
more = True
|
||||
while more:
|
||||
if resume_key:
|
||||
payload["resumeKey"] = resume_key
|
||||
|
||||
url = full_url(self.endpoint, params=payload)
|
||||
res = get_response(url, headers=headers)
|
||||
if isinstance(res, Exception):
|
||||
raise res
|
||||
|
||||
self.last_api_request_url = url
|
||||
|
||||
text = res.text.strip()
|
||||
lines = text.splitlines()
|
||||
|
||||
more = False
|
||||
|
||||
if len(lines) >= 3:
|
||||
|
||||
second_last_line = lines[-2]
|
||||
|
||||
if len(second_last_line) == 0:
|
||||
|
||||
resume_key = lines[-1].strip()
|
||||
text = text.replace(resume_key, "", 1).strip()
|
||||
more = True
|
||||
|
||||
yield text
|
||||
|
||||
def add_payload(self, payload: Dict[str, str]) -> None:
|
||||
"""
|
||||
Adds the payload to the payload dictionary.
|
||||
"""
|
||||
if self.start_timestamp:
|
||||
payload["from"] = self.start_timestamp
|
||||
|
||||
if self.end_timestamp:
|
||||
payload["to"] = self.end_timestamp
|
||||
|
||||
if self.gzip is None:
|
||||
payload["gzip"] = "false"
|
||||
|
||||
if self.match_type:
|
||||
payload["matchType"] = self.match_type
|
||||
|
||||
if self.filters and len(self.filters) > 0:
|
||||
for i, _filter in enumerate(self.filters):
|
||||
payload["filter" + str(i)] = _filter
|
||||
|
||||
if self.collapses and len(self.collapses) > 0:
|
||||
for i, collapse in enumerate(self.collapses):
|
||||
payload["collapse" + str(i)] = collapse
|
||||
|
||||
payload["url"] = self.url
|
||||
|
||||
def snapshots(self) -> Generator[CDXSnapshot, None, None]:
|
||||
"""
|
||||
This function yields the CDX data lines as snapshots.
|
||||
|
||||
As it is a generator it exhaustible, the reason that this is
|
||||
a generator and not a list are:
|
||||
|
||||
a) CDX server API can return millions of entries for a query and list
|
||||
is not suitable for such cases.
|
||||
|
||||
b) Preventing memory usage issues, as told before this method may yield
|
||||
millions of records for some queries and your system may not have enough
|
||||
memory for such a big list. Also Remember this if outputing to Jupyter
|
||||
Notebooks.
|
||||
|
||||
The objects yielded by this method are instance of CDXSnapshot class,
|
||||
you can access the attributes of the entries as the attribute of the instance
|
||||
itself.
|
||||
"""
|
||||
payload: Dict[str, str] = {}
|
||||
headers = {"User-Agent": self.user_agent}
|
||||
|
||||
self.add_payload(payload)
|
||||
|
||||
if not self.start_timestamp or self.end_timestamp:
|
||||
self.use_page = True
|
||||
|
||||
if self.collapses != []:
|
||||
self.use_page = False
|
||||
|
||||
entries = self.cdx_api_manager(payload, headers, use_page=self.use_page)
|
||||
|
||||
for entry in entries:
|
||||
|
||||
if entry.isspace() or len(entry) <= 1 or not entry:
|
||||
continue
|
||||
|
||||
# each line is a snapshot aka entry of the CDX server API.
|
||||
# We are able to split the page by lines because it only
|
||||
# splits the lines on a sinlge page and not all the entries
|
||||
# at once, thus there should be no issues of too much memory usage.
|
||||
snapshot_list = entry.split("\n")
|
||||
|
||||
for snapshot in snapshot_list:
|
||||
|
||||
# 14 + 32 == 46 ( timestamp + digest ), ignore the invalid entries.
|
||||
# they are invalid if their length is smaller than sum of length
|
||||
# of a standard wayback_timestamp and standard digest of an entry.
|
||||
if len(snapshot) < 46:
|
||||
continue
|
||||
|
||||
properties: Dict[str, Optional[str]] = {
|
||||
"urlkey": None,
|
||||
"timestamp": None,
|
||||
"original": None,
|
||||
"mimetype": None,
|
||||
"statuscode": None,
|
||||
"digest": None,
|
||||
"length": None,
|
||||
}
|
||||
|
||||
property_value = snapshot.split(" ")
|
||||
|
||||
total_property_values = len(property_value)
|
||||
warranted_total_property_values = len(properties)
|
||||
|
||||
if total_property_values != warranted_total_property_values:
|
||||
raise WaybackError(
|
||||
f"Snapshot returned by CDX API has {total_property_values} prop"
|
||||
f"erties instead of expected {warranted_total_property_values} "
|
||||
f"properties.\nProblematic Snapshot: {snapshot}"
|
||||
)
|
||||
|
||||
(
|
||||
properties["urlkey"],
|
||||
properties["timestamp"],
|
||||
properties["original"],
|
||||
properties["mimetype"],
|
||||
properties["statuscode"],
|
||||
properties["digest"],
|
||||
properties["length"],
|
||||
) = property_value
|
||||
|
||||
yield CDXSnapshot(cast(Dict[str, str], properties))
|
84
waybackpy/cdx_snapshot.py
Normal file
84
waybackpy/cdx_snapshot.py
Normal file
@ -0,0 +1,84 @@
|
||||
"""
|
||||
Module that contains the CDXSnapshot class, CDX records/lines are casted
|
||||
to CDXSnapshot objects for easier access.
|
||||
|
||||
The CDX index format is plain text data. Each line ('record') indicates a
|
||||
crawled document. And these lines are casted to CDXSnapshot.
|
||||
"""
|
||||
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Dict
|
||||
|
||||
|
||||
class CDXSnapshot:
|
||||
"""
|
||||
Class for the CDX snapshot lines('record') returned by the CDX API,
|
||||
Each valid line of the CDX API is casted to an CDXSnapshot object
|
||||
by the CDX API interface, just use "." to access any attribute of the
|
||||
CDX server API snapshot.
|
||||
|
||||
This provides the end-user the ease of using the data as attributes
|
||||
of the CDXSnapshot.
|
||||
|
||||
The string representation of the class is identical to the line returned
|
||||
by the CDX server API.
|
||||
|
||||
Besides all the attributes of the CDX server API this class also provides
|
||||
archive_url attribute, yes it is the archive url of the snapshot.
|
||||
|
||||
Attributes of the this class and what they represents and are useful for:
|
||||
|
||||
urlkey: The document captured, expressed as a SURT
|
||||
SURT stands for Sort-friendly URI Reordering Transform, and is a
|
||||
transformation applied to URIs which makes their left-to-right
|
||||
representation better match the natural hierarchy of domain names.
|
||||
A URI <scheme://domain.tld/path?query> has SURT
|
||||
form <scheme://(tld,domain,)/path?query>.
|
||||
|
||||
timestamp: The timestamp of the archive, format is yyyyMMddhhmmss and type
|
||||
is string.
|
||||
|
||||
datetime_timestamp: The timestamp as a datetime object.
|
||||
|
||||
original: The original URL of the archive. If archive_url is
|
||||
https://web.archive.org/web/20220113130051/https://google.com then the
|
||||
original URL is https://google.com
|
||||
|
||||
mimetype: The document’s file type. e.g. text/html
|
||||
|
||||
statuscode: HTTP response code for the document at the time of its crawling
|
||||
|
||||
digest: Base32-encoded SHA-1 checksum of the document for discriminating
|
||||
with others
|
||||
|
||||
length: Document’s volume of bytes in the WARC file
|
||||
|
||||
archive_url: The archive url of the snapshot, this is not returned by the
|
||||
CDX server API but created by this class on init.
|
||||
"""
|
||||
|
||||
def __init__(self, properties: Dict[str, str]) -> None:
|
||||
self.urlkey: str = properties["urlkey"]
|
||||
self.timestamp: str = properties["timestamp"]
|
||||
self.datetime_timestamp: datetime = datetime.strptime(
|
||||
self.timestamp, "%Y%m%d%H%M%S"
|
||||
)
|
||||
self.original: str = properties["original"]
|
||||
self.mimetype: str = properties["mimetype"]
|
||||
self.statuscode: str = properties["statuscode"]
|
||||
self.digest: str = properties["digest"]
|
||||
self.length: str = properties["length"]
|
||||
self.archive_url: str = (
|
||||
f"https://web.archive.org/web/{self.timestamp}/{self.original}"
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""
|
||||
The string representation is same as the line returned by the
|
||||
CDX server API for the snapshot.
|
||||
"""
|
||||
return (
|
||||
f"{self.urlkey} {self.timestamp} {self.original} "
|
||||
f"{self.mimetype} {self.statuscode} {self.digest} {self.length}"
|
||||
)
|
153
waybackpy/cdx_utils.py
Normal file
153
waybackpy/cdx_utils.py
Normal file
@ -0,0 +1,153 @@
|
||||
"""
|
||||
Utility functions required for accessing the CDX server API.
|
||||
|
||||
These are here in this module so that we don’t make any module too
|
||||
long.
|
||||
"""
|
||||
|
||||
import re
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from urllib.parse import quote
|
||||
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
from urllib3.util.retry import Retry
|
||||
|
||||
from .exceptions import WaybackError
|
||||
from .utils import DEFAULT_USER_AGENT
|
||||
|
||||
|
||||
def get_total_pages(url: str, user_agent: str = DEFAULT_USER_AGENT) -> int:
|
||||
"""
|
||||
When using the pagination use adding showNumPages=true to the request
|
||||
URL makes the CDX server return an integer which is the number of pages
|
||||
of CDX pages available for us to query using the pagination API.
|
||||
"""
|
||||
endpoint = "https://web.archive.org/cdx/search/cdx?"
|
||||
payload = {"showNumPages": "true", "url": str(url)}
|
||||
headers = {"User-Agent": user_agent}
|
||||
request_url = full_url(endpoint, params=payload)
|
||||
response = get_response(request_url, headers=headers)
|
||||
|
||||
if isinstance(response, requests.Response):
|
||||
return int(response.text.strip())
|
||||
raise response
|
||||
|
||||
|
||||
def full_url(endpoint: str, params: Dict[str, Any]) -> str:
|
||||
"""
|
||||
As the function's name already implies that it returns
|
||||
full URL, but why we need a function for generating full URL?
|
||||
The CDX server can support multiple arguments for parameters
|
||||
such as filter and collapse and this function adds them without
|
||||
overwriting earlier added arguments.
|
||||
"""
|
||||
if not params:
|
||||
return endpoint
|
||||
_full_url = endpoint if endpoint.endswith("?") else (endpoint + "?")
|
||||
|
||||
for key, val in params.items():
|
||||
key = "filter" if key.startswith("filter") else key
|
||||
key = "collapse" if key.startswith("collapse") else key
|
||||
amp = "" if _full_url.endswith("?") else "&"
|
||||
val = quote(str(val), safe="")
|
||||
_full_url += f"{amp}{key}={val}"
|
||||
|
||||
return _full_url
|
||||
|
||||
|
||||
def get_response(
|
||||
url: str,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
retries: int = 5,
|
||||
backoff_factor: float = 0.5,
|
||||
) -> Union[requests.Response, Exception]:
|
||||
"""
|
||||
Makes get request to the CDX server and returns the response.
|
||||
"""
|
||||
session = requests.Session()
|
||||
|
||||
retries_ = Retry(
|
||||
total=retries,
|
||||
backoff_factor=backoff_factor,
|
||||
status_forcelist=[500, 502, 503, 504],
|
||||
)
|
||||
|
||||
session.mount("https://", HTTPAdapter(max_retries=retries_))
|
||||
response = session.get(url, headers=headers)
|
||||
session.close()
|
||||
return response
|
||||
|
||||
|
||||
def check_filters(filters: List[str]) -> None:
|
||||
"""
|
||||
Check that the filter arguments passed by the end-user are valid.
|
||||
If not valid then raise WaybackError.
|
||||
"""
|
||||
if not isinstance(filters, list):
|
||||
raise WaybackError("filters must be a list.")
|
||||
|
||||
# [!]field:regex
|
||||
for _filter in filters:
|
||||
match = re.search(
|
||||
r"(\!?(?:urlkey|timestamp|original|mimetype|statuscode|digest|length)):"
|
||||
r"(.*)",
|
||||
_filter,
|
||||
)
|
||||
|
||||
if match is None or len(match.groups()) != 2:
|
||||
|
||||
exc_message = f"Filter '{_filter}' is not following the cdx filter syntax."
|
||||
raise WaybackError(exc_message)
|
||||
|
||||
|
||||
def check_collapses(collapses: List[str]) -> bool:
|
||||
"""
|
||||
Check that the collapse arguments passed by the end-user are valid.
|
||||
If not valid then raise WaybackError.
|
||||
"""
|
||||
if not isinstance(collapses, list):
|
||||
raise WaybackError("collapses must be a list.")
|
||||
|
||||
if len(collapses) == 0:
|
||||
return True
|
||||
|
||||
for collapse in collapses:
|
||||
match = re.search(
|
||||
r"(urlkey|timestamp|original|mimetype|statuscode|digest|length)"
|
||||
r"(:?[0-9]{1,99})?",
|
||||
collapse,
|
||||
)
|
||||
if match is None or len(match.groups()) != 2:
|
||||
exc_message = (
|
||||
f"collapse argument '{collapse}' "
|
||||
"is not following the cdx collapse syntax."
|
||||
)
|
||||
raise WaybackError(exc_message)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def check_match_type(match_type: Optional[str], url: str) -> bool:
|
||||
"""
|
||||
Check that the match_type argument passed by the end-user is valid.
|
||||
If not valid then raise WaybackError.
|
||||
"""
|
||||
legal_match_type = ["exact", "prefix", "host", "domain"]
|
||||
|
||||
if not match_type:
|
||||
return True
|
||||
|
||||
if "*" in url:
|
||||
raise WaybackError(
|
||||
"Can not use wildcard in the URL along with the match_type arguments."
|
||||
)
|
||||
|
||||
if match_type not in legal_match_type:
|
||||
exc_message = (
|
||||
f"{match_type} is not an allowed match type.\n"
|
||||
"Use one from 'exact', 'prefix', 'host' or 'domain'"
|
||||
)
|
||||
raise WaybackError(exc_message)
|
||||
|
||||
return True
|
443
waybackpy/cli.py
Normal file
443
waybackpy/cli.py
Normal file
@ -0,0 +1,443 @@
|
||||
"""
|
||||
Module responsible for enabling waybackpy to function as a CLI tool.
|
||||
"""
|
||||
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
from json import dumps
|
||||
from typing import Any, Generator, List, Optional
|
||||
|
||||
import click
|
||||
import requests
|
||||
|
||||
from . import __version__
|
||||
from .availability_api import WaybackMachineAvailabilityAPI
|
||||
from .cdx_api import WaybackMachineCDXServerAPI
|
||||
from .exceptions import ArchiveNotInAvailabilityAPIResponse
|
||||
from .save_api import WaybackMachineSaveAPI
|
||||
from .utils import DEFAULT_USER_AGENT
|
||||
from .wrapper import Url
|
||||
|
||||
|
||||
def echo_availability_api(
|
||||
availability_api_instance: WaybackMachineAvailabilityAPI, json: bool
|
||||
) -> None:
|
||||
"""
|
||||
Output for method that use the availability API.
|
||||
Near, oldest and newest output via this function.
|
||||
"""
|
||||
try:
|
||||
if availability_api_instance.archive_url:
|
||||
archive_url = availability_api_instance.archive_url
|
||||
except ArchiveNotInAvailabilityAPIResponse as error:
|
||||
message = (
|
||||
"NO ARCHIVE FOUND - The requested URL is probably "
|
||||
+ "not yet archived or if the URL was recently archived then it is "
|
||||
+ "not yet available via the Wayback Machine's availability API "
|
||||
+ "because of database lag and should be available after some time."
|
||||
)
|
||||
|
||||
click.echo(message + "\nJSON response:\n" + str(error), err=True)
|
||||
return
|
||||
|
||||
click.echo("Archive URL:")
|
||||
click.echo(archive_url)
|
||||
if json:
|
||||
click.echo("JSON response:")
|
||||
click.echo(dumps(availability_api_instance.json))
|
||||
|
||||
|
||||
def handle_cdx(data: List[Any]) -> None:
|
||||
"""
|
||||
Handles the CDX CLI options and output format.
|
||||
"""
|
||||
url = data[0]
|
||||
user_agent = data[1]
|
||||
start_timestamp = data[2]
|
||||
end_timestamp = data[3]
|
||||
cdx_filter = data[4]
|
||||
collapse = data[5]
|
||||
cdx_print = data[6]
|
||||
limit = data[7]
|
||||
gzip = data[8]
|
||||
match_type = data[9]
|
||||
|
||||
filters = list(cdx_filter)
|
||||
collapses = list(collapse)
|
||||
cdx_print = list(cdx_print)
|
||||
|
||||
cdx_api = WaybackMachineCDXServerAPI(
|
||||
url,
|
||||
user_agent=user_agent,
|
||||
start_timestamp=start_timestamp,
|
||||
end_timestamp=end_timestamp,
|
||||
filters=filters,
|
||||
match_type=match_type,
|
||||
gzip=gzip,
|
||||
collapses=collapses,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
snapshots = cdx_api.snapshots()
|
||||
|
||||
for snapshot in snapshots:
|
||||
if len(cdx_print) == 0:
|
||||
click.echo(snapshot)
|
||||
else:
|
||||
output_string = []
|
||||
if any(val in cdx_print for val in ["urlkey", "url-key", "url_key"]):
|
||||
output_string.append(snapshot.urlkey)
|
||||
if any(
|
||||
val in cdx_print for val in ["timestamp", "time-stamp", "time_stamp"]
|
||||
):
|
||||
output_string.append(snapshot.timestamp)
|
||||
if "original" in cdx_print:
|
||||
output_string.append(snapshot.original)
|
||||
if any(val in cdx_print for val in ["mimetype", "mime-type", "mime_type"]):
|
||||
output_string.append(snapshot.mimetype)
|
||||
if any(
|
||||
val in cdx_print for val in ["statuscode", "status-code", "status_code"]
|
||||
):
|
||||
output_string.append(snapshot.statuscode)
|
||||
if "digest" in cdx_print:
|
||||
output_string.append(snapshot.digest)
|
||||
if "length" in cdx_print:
|
||||
output_string.append(snapshot.length)
|
||||
if any(
|
||||
val in cdx_print for val in ["archiveurl", "archive-url", "archive_url"]
|
||||
):
|
||||
output_string.append(snapshot.archive_url)
|
||||
|
||||
click.echo(" ".join(output_string))
|
||||
|
||||
|
||||
def save_urls_on_file(url_gen: Generator[str, None, None]) -> None:
|
||||
"""
|
||||
Save output of CDX API on file.
|
||||
Mainly here because of backwards compatibility.
|
||||
"""
|
||||
domain = None
|
||||
sys_random = random.SystemRandom()
|
||||
uid = "".join(
|
||||
sys_random.choice(string.ascii_lowercase + string.digits) for _ in range(6)
|
||||
)
|
||||
url_count = 0
|
||||
file_name = None
|
||||
|
||||
for url in url_gen:
|
||||
url_count += 1
|
||||
if not domain:
|
||||
match = re.search("https?://([A-Za-z_0-9.-]+).*", url)
|
||||
|
||||
domain = "domain-unknown"
|
||||
|
||||
if match:
|
||||
domain = match.group(1)
|
||||
|
||||
file_name = f"{domain}-urls-{uid}.txt"
|
||||
file_path = os.path.join(os.getcwd(), file_name)
|
||||
if not os.path.isfile(file_path):
|
||||
open(file_path, "w+", encoding="utf-8").close()
|
||||
|
||||
with open(file_path, "a", encoding="utf-8") as file:
|
||||
file.write(f"{url}\n")
|
||||
|
||||
click.echo(url)
|
||||
|
||||
if url_count > 0:
|
||||
click.echo(
|
||||
f"\n\n{url_count} URLs saved inside '{file_name}' in the current "
|
||||
+ "working directory."
|
||||
)
|
||||
else:
|
||||
click.echo("No known URLs found. Please try a diffrent input!")
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option(
|
||||
"-u", "--url", help="URL on which Wayback machine operations are to be performed."
|
||||
)
|
||||
@click.option(
|
||||
"-ua",
|
||||
"--user-agent",
|
||||
"--user_agent",
|
||||
default=DEFAULT_USER_AGENT,
|
||||
help=f"User agent, default value is '{DEFAULT_USER_AGENT}'.",
|
||||
)
|
||||
@click.option("-v", "--version", is_flag=True, default=False, help="waybackpy version.")
|
||||
@click.option(
|
||||
"-l",
|
||||
"--show-license",
|
||||
"--show_license",
|
||||
"--license",
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help="Show license of Waybackpy.",
|
||||
)
|
||||
@click.option(
|
||||
"-n",
|
||||
"--newest",
|
||||
"-au",
|
||||
"--archive_url",
|
||||
"--archive-url",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
help="Retrieve the newest archive of URL.",
|
||||
)
|
||||
@click.option(
|
||||
"-o",
|
||||
"--oldest",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
help="Retrieve the oldest archive of URL.",
|
||||
)
|
||||
@click.option(
|
||||
"-j",
|
||||
"--json",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
help="JSON data returned by the availability API.",
|
||||
)
|
||||
@click.option(
|
||||
"-N",
|
||||
"--near",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
help="Archive close to a specified time.",
|
||||
)
|
||||
@click.option("-Y", "--year", type=click.IntRange(1994, 9999), help="Year in integer.")
|
||||
@click.option("-M", "--month", type=click.IntRange(1, 12), help="Month in integer.")
|
||||
@click.option("-D", "--day", type=click.IntRange(1, 31), help="Day in integer.")
|
||||
@click.option("-H", "--hour", type=click.IntRange(0, 24), help="Hour in integer.")
|
||||
@click.option("-MIN", "--minute", type=click.IntRange(0, 60), help="Minute in integer.")
|
||||
@click.option(
|
||||
"-s",
|
||||
"--save",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
help="Save the specified URL's webpage and print the archive URL.",
|
||||
)
|
||||
@click.option(
|
||||
"-h",
|
||||
"--headers",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
help="Headers data of the SavePageNow API.",
|
||||
)
|
||||
@click.option(
|
||||
"-ku",
|
||||
"--known-urls",
|
||||
"--known_urls",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
help="List known URLs. Uses CDX API.",
|
||||
)
|
||||
@click.option(
|
||||
"-sub",
|
||||
"--subdomain",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
help="Use with '--known_urls' to include known URLs for subdomains.",
|
||||
)
|
||||
@click.option(
|
||||
"-f",
|
||||
"--file",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
help="Use with '--known_urls' to save the URLs in file at current directory.",
|
||||
)
|
||||
@click.option(
|
||||
"-c",
|
||||
"--cdx",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
help="Flag for using CDX API.",
|
||||
)
|
||||
@click.option(
|
||||
"-st",
|
||||
"--start-timestamp",
|
||||
"--start_timestamp",
|
||||
"--from",
|
||||
help="Start timestamp for CDX API in yyyyMMddhhmmss format.",
|
||||
)
|
||||
@click.option(
|
||||
"-et",
|
||||
"--end-timestamp",
|
||||
"--end_timestamp",
|
||||
"--to",
|
||||
help="End timestamp for CDX API in yyyyMMddhhmmss format.",
|
||||
)
|
||||
@click.option(
|
||||
"-f",
|
||||
"--cdx-filter",
|
||||
"--cdx_filter",
|
||||
"--filter",
|
||||
multiple=True,
|
||||
help="Filter on a specific field or all the CDX fields.",
|
||||
)
|
||||
@click.option(
|
||||
"-mt",
|
||||
"--match-type",
|
||||
"--match_type",
|
||||
help="The default behavior is to return matches for an exact URL. "
|
||||
+ "However, the CDX server can also return results matching a certain prefix, "
|
||||
+ "a certain host, or all sub-hosts by using the match_type",
|
||||
)
|
||||
@click.option(
|
||||
"-gz",
|
||||
"--gzip",
|
||||
help="To disable gzip compression pass false as argument to this parameter. "
|
||||
+ "The default behavior is gzip compression enabled.",
|
||||
)
|
||||
@click.option(
|
||||
"-c",
|
||||
"--collapse",
|
||||
multiple=True,
|
||||
help="Filtering or 'collapse' results based on a field, or a substring of a field.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
"--limit",
|
||||
help="Number of maximum record that CDX API is asked to return per API call, "
|
||||
+ "default value is 25000 records.",
|
||||
)
|
||||
@click.option(
|
||||
"-cp",
|
||||
"--cdx-print",
|
||||
"--cdx_print",
|
||||
multiple=True,
|
||||
help="Print only certain fields of the CDX API response, "
|
||||
+ "if this parameter is not used then the plain text response of the CDX API "
|
||||
+ "will be printed.",
|
||||
)
|
||||
def main( # pylint: disable=no-value-for-parameter
|
||||
user_agent: str,
|
||||
version: bool,
|
||||
show_license: bool,
|
||||
newest: bool,
|
||||
oldest: bool,
|
||||
json: bool,
|
||||
near: bool,
|
||||
save: bool,
|
||||
headers: bool,
|
||||
known_urls: bool,
|
||||
subdomain: bool,
|
||||
file: bool,
|
||||
cdx: bool,
|
||||
cdx_filter: List[str],
|
||||
collapse: List[str],
|
||||
cdx_print: List[str],
|
||||
url: Optional[str] = None,
|
||||
year: Optional[int] = None,
|
||||
month: Optional[int] = None,
|
||||
day: Optional[int] = None,
|
||||
hour: Optional[int] = None,
|
||||
minute: Optional[int] = None,
|
||||
start_timestamp: Optional[str] = None,
|
||||
end_timestamp: Optional[str] = None,
|
||||
match_type: Optional[str] = None,
|
||||
gzip: Optional[str] = None,
|
||||
limit: Optional[str] = None,
|
||||
) -> None:
|
||||
"""\b
|
||||
_ _
|
||||
| | | |
|
||||
__ ____ _ _ _| |__ __ _ ___| | ___ __ _ _
|
||||
\\ \\ /\\ / / _` | | | | '_ \\ / _` |/ __| |/ / '_ \\| | | |
|
||||
\\ V V / (_| | |_| | |_) | (_| | (__| <| |_) | |_| |
|
||||
\\_/\\_/ \\__,_|\\__, |_.__/ \\__,_|\\___|_|\\_\\ .__/ \\__, |
|
||||
__/ | | | __/ |
|
||||
|___/ |_| |___/
|
||||
|
||||
Python package & CLI tool that interfaces the Wayback Machine APIs
|
||||
|
||||
Repository: https://github.com/akamhy/waybackpy
|
||||
|
||||
Documentation: https://github.com/akamhy/waybackpy/wiki/CLI-docs
|
||||
|
||||
waybackpy - CLI usage(Demo video): https://asciinema.org/a/464367
|
||||
|
||||
Released under the MIT License. Use the flag --license for license.
|
||||
|
||||
"""
|
||||
if version:
|
||||
click.echo(f"waybackpy version {__version__}")
|
||||
|
||||
elif show_license:
|
||||
click.echo(
|
||||
requests.get(
|
||||
url="https://raw.githubusercontent.com/akamhy/waybackpy/master/LICENSE"
|
||||
).text
|
||||
)
|
||||
elif url is None:
|
||||
click.echo("No URL detected. Please provide an URL.", err=True)
|
||||
|
||||
elif oldest:
|
||||
availability_api = WaybackMachineAvailabilityAPI(url, user_agent=user_agent)
|
||||
availability_api.oldest()
|
||||
echo_availability_api(availability_api, json)
|
||||
|
||||
elif newest:
|
||||
availability_api = WaybackMachineAvailabilityAPI(url, user_agent=user_agent)
|
||||
availability_api.newest()
|
||||
echo_availability_api(availability_api, json)
|
||||
|
||||
elif near:
|
||||
availability_api = WaybackMachineAvailabilityAPI(url, user_agent=user_agent)
|
||||
near_args = {}
|
||||
keys = ["year", "month", "day", "hour", "minute"]
|
||||
args_arr = [year, month, day, hour, minute]
|
||||
for key, arg in zip(keys, args_arr):
|
||||
if arg:
|
||||
near_args[key] = arg
|
||||
availability_api.near(**near_args)
|
||||
echo_availability_api(availability_api, json)
|
||||
|
||||
elif save:
|
||||
save_api = WaybackMachineSaveAPI(url, user_agent=user_agent)
|
||||
save_api.save()
|
||||
click.echo("Archive URL:")
|
||||
click.echo(save_api.archive_url)
|
||||
click.echo("Cached save:")
|
||||
click.echo(save_api.cached_save)
|
||||
if headers:
|
||||
click.echo("Save API headers:")
|
||||
click.echo(save_api.headers)
|
||||
|
||||
elif known_urls:
|
||||
wayback = Url(url, user_agent)
|
||||
url_gen = wayback.known_urls(subdomain=subdomain)
|
||||
|
||||
if file:
|
||||
save_urls_on_file(url_gen)
|
||||
else:
|
||||
for url_ in url_gen:
|
||||
click.echo(url_)
|
||||
|
||||
elif cdx:
|
||||
data = [
|
||||
url,
|
||||
user_agent,
|
||||
start_timestamp,
|
||||
end_timestamp,
|
||||
cdx_filter,
|
||||
collapse,
|
||||
cdx_print,
|
||||
limit,
|
||||
gzip,
|
||||
match_type,
|
||||
]
|
||||
handle_cdx(data)
|
||||
|
||||
else:
|
||||
click.echo(
|
||||
"Only URL passed, but did not specify what to do with the URL. "
|
||||
"Use --help flag for help using waybackpy.",
|
||||
err=True,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main() # pylint: disable=no-value-for-parameter
|
@ -1,43 +1,49 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
waybackpy.exceptions
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
This module contains the set of Waybackpy's exceptions.
|
||||
"""
|
||||
|
||||
class TooManyArchivingRequests(Exception):
|
||||
|
||||
"""Error when a single url reqeusted for archiving too many times in a short timespam.
|
||||
Wayback machine doesn't supports archivng any url too many times in a short period of time.
|
||||
class WaybackError(Exception):
|
||||
"""
|
||||
Raised when Waybackpy can not return what you asked for.
|
||||
|
||||
1) Wayback Machine API Service is unreachable/down.
|
||||
2) You passed illegal arguments.
|
||||
|
||||
All other exceptions are inherited from this main exception.
|
||||
"""
|
||||
|
||||
class ArchivingNotAllowed(Exception):
|
||||
|
||||
"""Files like robots.txt are set to deny robot archiving.
|
||||
Wayback machine respects these file, will not archive.
|
||||
class TooManyRequestsError(WaybackError):
|
||||
"""
|
||||
Raised when you make more than 15 requests per
|
||||
minute and the Wayback Machine returns 429.
|
||||
|
||||
See https://github.com/akamhy/waybackpy/issues/131
|
||||
"""
|
||||
|
||||
class PageNotSaved(Exception):
|
||||
|
||||
class MaximumRetriesExceeded(WaybackError):
|
||||
"""
|
||||
When unable to save a webpage.
|
||||
MaximumRetriesExceeded
|
||||
"""
|
||||
|
||||
class ArchiveNotFound(Exception):
|
||||
|
||||
class MaximumSaveRetriesExceeded(MaximumRetriesExceeded):
|
||||
"""
|
||||
When a page was never archived but client asks for old archive.
|
||||
MaximumSaveRetriesExceeded
|
||||
"""
|
||||
|
||||
class UrlNotFound(Exception):
|
||||
|
||||
class ArchiveNotInAvailabilityAPIResponse(WaybackError):
|
||||
"""
|
||||
Raised when 404 UrlNotFound.
|
||||
Could not parse the archive in the JSON response of the availability API.
|
||||
"""
|
||||
|
||||
class BadGateWay(Exception):
|
||||
"""
|
||||
Raised when 502 bad gateway.
|
||||
"""
|
||||
|
||||
class WaybackUnavailable(Exception):
|
||||
class InvalidJSONInAvailabilityAPIResponse(WaybackError):
|
||||
"""
|
||||
Raised when 503 API Service Temporarily Unavailable.
|
||||
"""
|
||||
|
||||
class InvalidUrl(Exception):
|
||||
"""
|
||||
Raised when url doesn't follow the standard url format.
|
||||
availability api returned invalid JSON
|
||||
"""
|
||||
|
225
waybackpy/save_api.py
Normal file
225
waybackpy/save_api.py
Normal file
@ -0,0 +1,225 @@
|
||||
"""
|
||||
This module interfaces the Wayback Machine's SavePageNow (SPN) API.
|
||||
|
||||
The module has WaybackMachineSaveAPI class which should be used by the users of
|
||||
this module to use the SavePageNow API.
|
||||
"""
|
||||
|
||||
import re
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import Dict, Optional
|
||||
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
from requests.models import Response
|
||||
from requests.structures import CaseInsensitiveDict
|
||||
from urllib3.util.retry import Retry
|
||||
|
||||
from .exceptions import MaximumSaveRetriesExceeded, TooManyRequestsError, WaybackError
|
||||
from .utils import DEFAULT_USER_AGENT
|
||||
|
||||
|
||||
class WaybackMachineSaveAPI:
|
||||
"""
|
||||
WaybackMachineSaveAPI class provides an interface for saving URLs on the
|
||||
Wayback Machine.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
url: str,
|
||||
user_agent: str = DEFAULT_USER_AGENT,
|
||||
max_tries: int = 8,
|
||||
) -> None:
|
||||
self.url = str(url).strip().replace(" ", "%20")
|
||||
self.request_url = "https://web.archive.org/save/" + self.url
|
||||
self.user_agent = user_agent
|
||||
self.request_headers: Dict[str, str] = {"User-Agent": self.user_agent}
|
||||
if max_tries < 1:
|
||||
raise ValueError("max_tries should be positive")
|
||||
self.max_tries = max_tries
|
||||
self.total_save_retries = 5
|
||||
self.backoff_factor = 0.5
|
||||
self.status_forcelist = [500, 502, 503, 504]
|
||||
self._archive_url: Optional[str] = None
|
||||
self.instance_birth_time = datetime.utcnow()
|
||||
self.response: Optional[Response] = None
|
||||
self.headers: Optional[CaseInsensitiveDict[str]] = None
|
||||
self.status_code: Optional[int] = None
|
||||
self.response_url: Optional[str] = None
|
||||
self.cached_save: Optional[bool] = None
|
||||
self.saved_archive: Optional[str] = None
|
||||
|
||||
@property
|
||||
def archive_url(self) -> str:
|
||||
"""
|
||||
Returns the archive URL is already cached by _archive_url
|
||||
else invoke the save method to save the archive which returns the
|
||||
archive thus we return the methods return value.
|
||||
"""
|
||||
if self._archive_url:
|
||||
return self._archive_url
|
||||
|
||||
return self.save()
|
||||
|
||||
def get_save_request_headers(self) -> None:
|
||||
"""
|
||||
Creates a session and tries 'retries' number of times to
|
||||
retrieve the archive.
|
||||
|
||||
If successful in getting the response, sets the headers, status_code
|
||||
and response_url attributes.
|
||||
|
||||
The archive is usually in the headers but it can also be the response URL
|
||||
as the Wayback Machine redirects to the archive after a successful capture
|
||||
of the webpage.
|
||||
|
||||
Wayback Machine's save API is known
|
||||
to be very unreliable thus if it fails first check opening
|
||||
the response URL yourself in the browser.
|
||||
"""
|
||||
session = requests.Session()
|
||||
retries = Retry(
|
||||
total=self.total_save_retries,
|
||||
backoff_factor=self.backoff_factor,
|
||||
status_forcelist=self.status_forcelist,
|
||||
)
|
||||
session.mount("https://", HTTPAdapter(max_retries=retries))
|
||||
self.response = session.get(self.request_url, headers=self.request_headers)
|
||||
# requests.response.headers is requests.structures.CaseInsensitiveDict
|
||||
self.headers = self.response.headers
|
||||
self.status_code = self.response.status_code
|
||||
self.response_url = self.response.url
|
||||
session.close()
|
||||
|
||||
if self.status_code == 429:
|
||||
# why wait 5 minutes and 429?
|
||||
# see https://github.com/akamhy/waybackpy/issues/97
|
||||
raise TooManyRequestsError(
|
||||
f"Can not save '{self.url}'. "
|
||||
f"Save request refused by the server. "
|
||||
f"Save Page Now limits saving 15 URLs per minutes. "
|
||||
f"Try waiting for 5 minutes and then try again."
|
||||
)
|
||||
|
||||
# why 509?
|
||||
# see https://github.com/akamhy/waybackpy/pull/99
|
||||
# also https://t.co/xww4YJ0Iwc
|
||||
if self.status_code == 509:
|
||||
raise WaybackError(
|
||||
f"Can not save '{self.url}'. You have probably reached the "
|
||||
f"limit of active sessions."
|
||||
)
|
||||
|
||||
def archive_url_parser(self) -> Optional[str]:
|
||||
"""
|
||||
Three regexen (like oxen?) are used to search for the
|
||||
archive URL in the headers and finally look in the response URL
|
||||
for the archive URL.
|
||||
"""
|
||||
regex1 = r"Content-Location: (/web/[0-9]{14}/.*)"
|
||||
match = re.search(regex1, str(self.headers))
|
||||
if match:
|
||||
return "https://web.archive.org" + match.group(1)
|
||||
|
||||
regex2 = r"rel=\"memento.*?(web\.archive\.org/web/[0-9]{14}/.*?)>"
|
||||
match = re.search(regex2, str(self.headers))
|
||||
if match is not None and len(match.groups()) == 1:
|
||||
return "https://" + match.group(1)
|
||||
|
||||
regex3 = r"X-Cache-Key:\shttps(.*)[A-Z]{2}"
|
||||
match = re.search(regex3, str(self.headers))
|
||||
if match is not None and len(match.groups()) == 1:
|
||||
return "https" + match.group(1)
|
||||
|
||||
self.response_url = (
|
||||
"" if self.response_url is None else self.response_url.strip()
|
||||
)
|
||||
regex4 = r"web\.archive\.org/web/(?:[0-9]*?)/(?:.*)$"
|
||||
match = re.search(regex4, self.response_url)
|
||||
if match is not None:
|
||||
return "https://" + match.group(0)
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def sleep(tries: int) -> None:
|
||||
"""
|
||||
Ensure that the we wait some time before succesive retries so that we
|
||||
don't waste the retries before the page is even captured by the Wayback
|
||||
Machine crawlers also ensures that we are not putting too much load on
|
||||
the Wayback Machine's save API.
|
||||
|
||||
If tries are multiple of 3 sleep 10 seconds else sleep 5 seconds.
|
||||
"""
|
||||
sleep_seconds = 5
|
||||
if tries % 3 == 0:
|
||||
sleep_seconds = 10
|
||||
time.sleep(sleep_seconds)
|
||||
|
||||
def timestamp(self) -> datetime:
|
||||
"""
|
||||
Read the timestamp off the archive URL and convert the Wayback Machine
|
||||
timestamp to datetime object.
|
||||
|
||||
Also check if the time on archive is URL and compare it to instance birth
|
||||
time.
|
||||
|
||||
If time on the archive is older than the instance creation time set the
|
||||
cached_save to True else set it to False. The flag can be used to check
|
||||
if the Wayback Machine didn't serve a Cached URL. It is quite common for
|
||||
the Wayback Machine to serve cached archive if last archive was captured
|
||||
before last 45 minutes.
|
||||
"""
|
||||
regex = r"https?://web\.archive.org/web/([0-9]{14})/http"
|
||||
match = re.search(regex, str(self._archive_url))
|
||||
|
||||
if match is None or len(match.groups()) != 1:
|
||||
raise ValueError(
|
||||
f"Can not parse timestamp from archive URL, '{self._archive_url}'."
|
||||
)
|
||||
|
||||
string_timestamp = match.group(1)
|
||||
timestamp = datetime.strptime(string_timestamp, "%Y%m%d%H%M%S")
|
||||
timestamp_unixtime = time.mktime(timestamp.timetuple())
|
||||
instance_birth_time_unixtime = time.mktime(self.instance_birth_time.timetuple())
|
||||
|
||||
if timestamp_unixtime < instance_birth_time_unixtime:
|
||||
self.cached_save = True
|
||||
else:
|
||||
self.cached_save = False
|
||||
|
||||
return timestamp
|
||||
|
||||
def save(self) -> str:
|
||||
"""
|
||||
Calls the SavePageNow API of the Wayback Machine with required parameters
|
||||
and headers to save the URL.
|
||||
|
||||
Raises MaximumSaveRetriesExceeded is maximum retries are exhausted but still
|
||||
we were unable to retrieve the archive from the Wayback Machine.
|
||||
"""
|
||||
self.saved_archive = None
|
||||
tries = 0
|
||||
|
||||
while True:
|
||||
if tries >= 1:
|
||||
self.sleep(tries)
|
||||
|
||||
self.get_save_request_headers()
|
||||
self.saved_archive = self.archive_url_parser()
|
||||
|
||||
if isinstance(self.saved_archive, str):
|
||||
self._archive_url = self.saved_archive
|
||||
self.timestamp()
|
||||
return self.saved_archive
|
||||
|
||||
tries += 1
|
||||
if tries >= self.max_tries:
|
||||
raise MaximumSaveRetriesExceeded(
|
||||
f"Tried {tries} times but failed to save "
|
||||
f"and retrieve the archive for {self.url}.\n"
|
||||
f"Response URL:\n{self.response_url}\n"
|
||||
f"Response Header:\n{self.headers}"
|
||||
)
|
9
waybackpy/utils.py
Normal file
9
waybackpy/utils.py
Normal file
@ -0,0 +1,9 @@
|
||||
"""
|
||||
Utility functions and shared variables like DEFAULT_USER_AGENT are here.
|
||||
"""
|
||||
|
||||
from . import __version__
|
||||
|
||||
DEFAULT_USER_AGENT: str = (
|
||||
f"waybackpy {__version__} - https://github.com/akamhy/waybackpy"
|
||||
)
|
@ -1,143 +1,162 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import json
|
||||
from datetime import datetime
|
||||
from waybackpy.exceptions import TooManyArchivingRequests, ArchivingNotAllowed, PageNotSaved, ArchiveNotFound, UrlNotFound, BadGateWay, InvalidUrl, WaybackUnavailable
|
||||
try:
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import HTTPError, URLError
|
||||
except ImportError:
|
||||
from urllib2 import Request, urlopen, HTTPError, URLError
|
||||
"""
|
||||
This module exists because backwards compatibility matters.
|
||||
Don't touch this or add any new functionality here and don't use
|
||||
the Url class.
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Generator, Optional
|
||||
|
||||
from requests.structures import CaseInsensitiveDict
|
||||
|
||||
from .availability_api import ResponseJSON, WaybackMachineAvailabilityAPI
|
||||
from .cdx_api import WaybackMachineCDXServerAPI
|
||||
from .save_api import WaybackMachineSaveAPI
|
||||
from .utils import DEFAULT_USER_AGENT
|
||||
|
||||
|
||||
default_UA = "waybackpy python package"
|
||||
class Url:
|
||||
"""
|
||||
The Url class is not recommended to be used anymore, instead use:
|
||||
|
||||
def url_check(url):
|
||||
if "." not in url:
|
||||
raise InvalidUrl("'%s' is not a vaild url." % url)
|
||||
- WaybackMachineSaveAPI
|
||||
- WaybackMachineAvailabilityAPI
|
||||
- WaybackMachineCDXServerAPI
|
||||
|
||||
def clean_url(url):
|
||||
return str(url).strip().replace(" ","_")
|
||||
The reason it is still in the code is backwards compatibility with 2.x.x
|
||||
versions.
|
||||
|
||||
def wayback_timestamp(**kwargs):
|
||||
return (
|
||||
str(kwargs["year"])
|
||||
+
|
||||
str(kwargs["month"]).zfill(2)
|
||||
+
|
||||
str(kwargs["day"]).zfill(2)
|
||||
+
|
||||
str(kwargs["hour"]).zfill(2)
|
||||
+
|
||||
str(kwargs["minute"]).zfill(2)
|
||||
)
|
||||
If were are using the Url before the update to version 3.x.x, your code should
|
||||
still be working fine and there is no hurry to update the interface but is
|
||||
recommended that you do not use the Url class for new code as it would be
|
||||
removed after 2025 also the first 3.x.x versions was released in January 2022
|
||||
and three years are more than enough to update the older interface code.
|
||||
"""
|
||||
|
||||
def handle_HTTPError(e):
|
||||
if e.code == 502:
|
||||
raise BadGateWay(e)
|
||||
elif e.code == 503:
|
||||
raise WaybackUnavailable(e)
|
||||
elif e.code == 429:
|
||||
raise TooManyArchivingRequests(e)
|
||||
elif e.code == 404:
|
||||
raise UrlNotFound(e)
|
||||
def __init__(self, url: str, user_agent: str = DEFAULT_USER_AGENT) -> None:
|
||||
self.url = url
|
||||
self.user_agent = str(user_agent)
|
||||
self.archive_url: Optional[str] = None
|
||||
self.timestamp: Optional[datetime] = None
|
||||
self.wayback_machine_availability_api = WaybackMachineAvailabilityAPI(
|
||||
self.url, user_agent=self.user_agent
|
||||
)
|
||||
self.wayback_machine_save_api: Optional[WaybackMachineSaveAPI] = None
|
||||
self.headers: Optional[CaseInsensitiveDict[str]] = None
|
||||
self.json: Optional[ResponseJSON] = None
|
||||
|
||||
def save(url, UA=default_UA):
|
||||
url_check(url)
|
||||
request_url = ("https://web.archive.org/save/" + clean_url(url))
|
||||
def __str__(self) -> str:
|
||||
if not self.archive_url:
|
||||
self.newest()
|
||||
return str(self.archive_url)
|
||||
|
||||
hdr = { 'User-Agent' : '%s' % UA } #nosec
|
||||
req = Request(request_url, headers=hdr) #nosec
|
||||
def __len__(self) -> int:
|
||||
td_max = timedelta(
|
||||
days=999999999, hours=23, minutes=59, seconds=59, microseconds=999999
|
||||
)
|
||||
|
||||
if not isinstance(self.timestamp, datetime):
|
||||
self.oldest()
|
||||
|
||||
try:
|
||||
response = urlopen(req) #nosec
|
||||
except HTTPError as e:
|
||||
if handle_HTTPError(e) is None:
|
||||
raise PageNotSaved(e)
|
||||
except URLError:
|
||||
try:
|
||||
response = urlopen(req) #nosec
|
||||
except URLError as e:
|
||||
raise UrlNotFound(e)
|
||||
if not isinstance(self.timestamp, datetime):
|
||||
raise TypeError("timestamp must be a datetime")
|
||||
|
||||
header = response.headers
|
||||
if self.timestamp == datetime.max:
|
||||
return td_max.days
|
||||
|
||||
if "exclusion.robots.policy" in str(header):
|
||||
raise ArchivingNotAllowed("Can not archive %s. Disabled by site owner." % (url))
|
||||
return (datetime.utcnow() - self.timestamp).days
|
||||
|
||||
return "https://web.archive.org" + header['Content-Location']
|
||||
def save(self) -> "Url":
|
||||
"""Save the URL on wayback machine."""
|
||||
self.wayback_machine_save_api = WaybackMachineSaveAPI(
|
||||
self.url, user_agent=self.user_agent
|
||||
)
|
||||
self.archive_url = self.wayback_machine_save_api.archive_url
|
||||
self.timestamp = self.wayback_machine_save_api.timestamp()
|
||||
self.headers = self.wayback_machine_save_api.headers
|
||||
return self
|
||||
|
||||
def get(url, encoding=None, UA=default_UA):
|
||||
url_check(url)
|
||||
hdr = { 'User-Agent' : '%s' % UA }
|
||||
req = Request(clean_url(url), headers=hdr) #nosec
|
||||
def near(
|
||||
self,
|
||||
year: Optional[int] = None,
|
||||
month: Optional[int] = None,
|
||||
day: Optional[int] = None,
|
||||
hour: Optional[int] = None,
|
||||
minute: Optional[int] = None,
|
||||
unix_timestamp: Optional[int] = None,
|
||||
) -> "Url":
|
||||
"""Returns the archive of the URL close to a date and time."""
|
||||
self.wayback_machine_availability_api.near(
|
||||
year=year,
|
||||
month=month,
|
||||
day=day,
|
||||
hour=hour,
|
||||
minute=minute,
|
||||
unix_timestamp=unix_timestamp,
|
||||
)
|
||||
self.set_availability_api_attrs()
|
||||
return self
|
||||
|
||||
try:
|
||||
resp=urlopen(req) #nosec
|
||||
except URLError:
|
||||
try:
|
||||
resp=urlopen(req) #nosec
|
||||
except URLError as e:
|
||||
raise UrlNotFound(e)
|
||||
def oldest(self) -> "Url":
|
||||
"""Returns the oldest archive of the URL."""
|
||||
self.wayback_machine_availability_api.oldest()
|
||||
self.set_availability_api_attrs()
|
||||
return self
|
||||
|
||||
if encoding is None:
|
||||
try:
|
||||
encoding= resp.headers['content-type'].split('charset=')[-1]
|
||||
except AttributeError:
|
||||
encoding = "UTF-8"
|
||||
def newest(self) -> "Url":
|
||||
"""Returns the newest archive of the URL."""
|
||||
self.wayback_machine_availability_api.newest()
|
||||
self.set_availability_api_attrs()
|
||||
return self
|
||||
|
||||
return resp.read().decode(encoding.replace("text/html", "UTF-8", 1))
|
||||
def set_availability_api_attrs(self) -> None:
|
||||
"""Set the attributes for total backwards compatibility."""
|
||||
self.archive_url = self.wayback_machine_availability_api.archive_url
|
||||
self.json = self.wayback_machine_availability_api.json
|
||||
self.JSON = self.json # for backwards compatibility, do not remove it.
|
||||
self.timestamp = self.wayback_machine_availability_api.timestamp()
|
||||
|
||||
def near(url, **kwargs):
|
||||
def total_archives(
|
||||
self, start_timestamp: Optional[str] = None, end_timestamp: Optional[str] = None
|
||||
) -> int:
|
||||
"""
|
||||
Returns an integer which indicates total number of archives for an URL.
|
||||
Useless in my opinion, only here because of backwards compatibility.
|
||||
"""
|
||||
cdx = WaybackMachineCDXServerAPI(
|
||||
self.url,
|
||||
user_agent=self.user_agent,
|
||||
start_timestamp=start_timestamp,
|
||||
end_timestamp=end_timestamp,
|
||||
)
|
||||
|
||||
try:
|
||||
url = kwargs["url"]
|
||||
except KeyError:
|
||||
url = url
|
||||
count = 0
|
||||
for _ in cdx.snapshots():
|
||||
count = count + 1
|
||||
return count
|
||||
|
||||
year=kwargs.get("year", datetime.utcnow().strftime('%Y'))
|
||||
month=kwargs.get("month", datetime.utcnow().strftime('%m'))
|
||||
day=kwargs.get("day", datetime.utcnow().strftime('%d'))
|
||||
hour=kwargs.get("hour", datetime.utcnow().strftime('%H'))
|
||||
minute=kwargs.get("minute", datetime.utcnow().strftime('%M'))
|
||||
UA=kwargs.get("UA", default_UA)
|
||||
def known_urls(
|
||||
self,
|
||||
subdomain: bool = False,
|
||||
host: bool = False,
|
||||
start_timestamp: Optional[str] = None,
|
||||
end_timestamp: Optional[str] = None,
|
||||
match_type: str = "prefix",
|
||||
) -> Generator[str, None, None]:
|
||||
"""Yields known URLs for any URL."""
|
||||
if subdomain:
|
||||
match_type = "domain"
|
||||
if host:
|
||||
match_type = "host"
|
||||
|
||||
url_check(url)
|
||||
timestamp = wayback_timestamp(year=year,month=month,day=day,hour=hour,minute=minute)
|
||||
request_url = "https://archive.org/wayback/available?url=%s×tamp=%s" % (clean_url(url), str(timestamp))
|
||||
hdr = { 'User-Agent' : '%s' % UA }
|
||||
req = Request(request_url, headers=hdr) # nosec
|
||||
cdx = WaybackMachineCDXServerAPI(
|
||||
self.url,
|
||||
user_agent=self.user_agent,
|
||||
start_timestamp=start_timestamp,
|
||||
end_timestamp=end_timestamp,
|
||||
match_type=match_type,
|
||||
collapses=["urlkey"],
|
||||
)
|
||||
|
||||
try:
|
||||
response = urlopen(req) #nosec
|
||||
except HTTPError as e:
|
||||
handle_HTTPError(e)
|
||||
|
||||
data = json.loads(response.read().decode("UTF-8"))
|
||||
if not data["archived_snapshots"]:
|
||||
raise ArchiveNotFound("'%s' is not yet archived." % url)
|
||||
|
||||
archive_url = (data["archived_snapshots"]["closest"]["url"])
|
||||
# wayback machine returns http sometimes, idk why? But they support https
|
||||
archive_url = archive_url.replace("http://web.archive.org/web/","https://web.archive.org/web/",1)
|
||||
return archive_url
|
||||
|
||||
def oldest(url, UA=default_UA, year=1994):
|
||||
return near(url, year=year, UA=UA)
|
||||
|
||||
def newest(url, UA=default_UA):
|
||||
return near(url, UA=UA)
|
||||
|
||||
def total_archives(url, UA=default_UA):
|
||||
url_check(url)
|
||||
|
||||
hdr = { 'User-Agent' : '%s' % UA }
|
||||
request_url = "https://web.archive.org/cdx/search/cdx?url=%s&output=json" % clean_url(url)
|
||||
req = Request(request_url, headers=hdr) # nosec
|
||||
|
||||
try:
|
||||
response = urlopen(req) #nosec
|
||||
except HTTPError as e:
|
||||
handle_HTTPError(e)
|
||||
|
||||
return (len(json.loads(response.read())))
|
||||
for snapshot in cdx.snapshots():
|
||||
yield snapshot.original
|
||||
|
Reference in New Issue
Block a user