Table of Contents

crack md5 using password list

import hashlib

hash1 ="d3d0045e38575a34a6cd72c22e2eb7f2"

with open("password-list.txt", "r") as files:
  for i in files:
    j = hashlib.md5(i.strip().encode()).hexdigest()
    if j == hash1:
      break

print("Password found: %s" %(i))

crack sha /etc/shadow

!/usr/bin/python3

import hashlib
import crypt

salt1 = "$6$60F6HXE1fqkRWYxR$"
hash1 ="Bh3oM.HvbkJmuFGnHifRWoDv7B4CrMFVBN.AJTM4ZYflXhuEdXYWfuf0h3P5Vu2M6uKIasmsOoI9qLXQwp6vA/"

password1 = salt1 + hash1

with open("password-list.txt", "r") as files:
  for i in files:
    j = crypt.crypt(i.strip(), "$6$60F6HXE1fqkRWYxR$")
    if j == password1:
      print("I found the following password: %s" %(i))
      break

why use python

Pandas - data analyses libray

Scapy - sniff and dissect and forge network packets

Nmap - helps automate scaning results and reports

Beatifulsoup - is a library that makes it easy to scrape information from web pages

Requests - The requests module allows you to send HTTP requests using Python

forensics analyses tools

apt-get install python3-exif python3-requests python3-bs4 python3-pip

find the images

import requests
from bs4 import BeautifulSoup


url = "http://python.vic-tim.de/images/"

response = requests.get(url)
if response.status_code == 200:
    doc = BeautifulSoup(response.text, "html.parser") 
    
    print(doc.title)
    images = doc.find_all("img")
    
    for img in images:
#        print(doc.title)
#        print(type(img))
#        print(img)
#        print(img.attrs)
        print(img.attrs["src"])
        

find and save the images

 import requests
 import urllib
 import os 
 from bs4 import BeautifulSoup
 
 #create the folder for saving the images
 if not os.path.exists("./images"): 
   os.mkdir("./images")
 
 url = "http://python.vic-tim.de/images/"
 response = requests.get(url)
 
 found_images = []
 if response.status_code == 200:
     doc = BeautifulSoup(response.text, "html.parser") 
 
     images = doc.find_all("img")
     
     for img in images:
         path = urllib.parse.urljoin(url, img.attrs["src"])
         found_images.append(path)
 
 for found_image in found_images:
     filename = found_image.split("/")[-1]
     response = requests.get(found_image)
     with open("./images/" + filename, "wb") as file:
         file.write(response.content)