Python 3 Script to Download Multiple Files and Images From Website Full Project For Beginners

 

app.py

 

with open('file.txt') as f:
   for line in f:
      url = line
      path = 'your path'+url.split('/', -1)[-1]
      urllib.request.urlretrieve(url, path.rstrip('\n'))

 

 

Where you will store all of the images and files urls inside the file called as file.txt and then run this script by running the below command

 

python app.py

 

 

First you have to read your .txt file as something you can iterate over. Then you can use a for loop to go one-by-one over the url links:

import os

urls = open('pages.txt', 'r')
for i, url in enumerate(urls):
    path = '/users/user/Downloads/{}'.format(os.path.basename(url)
    urllib.request.urlretrieve(url, path)
    

 

 

You can use a ThreadPool or ProcessingPool for concurrency

import requests
from multiprocessing.pool import ThreadPool

def download_url(url):
  print("downloading: ",url)
  # assumes that the last segment after the / represents the file name
  # if url is abc/xyz/file.txt, the file name will be file.txt
  file_name_start_pos = url.rfind("/") + 1
  file_name = url[file_name_start_pos:]

  r = requests.get(url, stream=True)
  if r.status_code == requests.codes.ok:
    with open(file_name, 'wb') as f:
      for data in r:
        f.write(data)
  return url


urls = ["https://jsonplaceholder.typicode.com/posts",
        "https://jsonplaceholder.typicode.com/comments",
        "https://jsonplaceholder.typicode.com/photos",
        "https://jsonplaceholder.typicode.com/todos",
        "https://jsonplaceholder.typicode.com/albums"
        ]

# Run 5 multiple threads. Each call will take the next element in urls list
results = ThreadPool(5).imap_unordered(download_url, urls)
for r in results:
    print(r)

Leave a Reply