Follow

Keep Up to Date with the Most Important News

By pressing the Subscribe button, you confirm that you have read and are agreeing to our Privacy Policy and Terms of Use
Contact

Writing text to csv after web-scraping with python

Iam exctracting real estate data via scraping in python. I want this data to be in csv file.
When i write data to csv if first scraped item dont have value i need, it just skip all the row (but other items have that values), which is null and not creating any row, not even with null values.

My code block for web scraping:

from selenium import webdriver
from bs4 import BeautifulSoup
import re
import csv
import time


PATH = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(PATH)
data = []


def get_dl(soup):
    d_list = {}

    for dl in soup.findAll("dl", {"class": "obj-details"}):
        for el in dl.find_all(["dt", "dd"]):
            if el.name == 'dt':
                key = el.get_text(strip=True)
            elif key in ['Plotas:', 'Buto numeris:', 'Metai:', 'Namo numeris:', 'Kambarių sk.:', 'Aukštas:', 'Aukštų sk.:', 'Pastato tipas:', 'Šildymas:', 'Įrengimas:', 'Pastato energijos suvartojimo klasė:', 'Ypatybės:', 'Papildomos patalpos:', 'Papildoma įranga:', 'Apsauga:']:
                d_list[key] = ' '.join(el.text.strip().replace("\n", ", ").split('NAUDINGA')[0].split('m²')[0].split())
    return d_list

for puslapis in range(1, 2):
    driver.get(f'https://www.aruodas.lt/butai/kaune/puslapis/{puslapis}')
    response = driver.page_source
    soup = BeautifulSoup(response, 'html.parser')
    blocks = soup.find_all('tr', class_='list-row')
    stored_urls = []

    for url in blocks:
        try:
            stored_urls.append(url.a['href'])
        except:
            pass

    for link in stored_urls:
        driver.get(link)
        response = driver.page_source
        soup = BeautifulSoup(response, 'html.parser')
        h1 = soup.find('h1', 'obj-header-text')
        price = soup.find('div', class_ = 'price-left')

        try:
            address1 = h1.get_text(strip=True)
            address2 = re.findall(r'(.*),[^,]*$', address1)
            address = ''.join(address2)
            city, district, street = address.split(',')
        except:
            city, district, street = 'NaN'

        try:
            full_price = price.find('span', class_ = 'price-eur').text.strip()
            full_price1 = full_price.replace('€', '').replace(' ','').strip()
        except:
            full_price1 = 'NaN'

        try:
            price_sq_m = price.find('span', class_ = 'price-per').text.strip()
            price_sq_m1 = price_sq_m.replace('€/m²)', '').replace('(domina keitimas)', '').replace('(', '').replace(' ','').strip()
        except:
            price_sq_m1 = 'NaN'

        try:
            price_change = price.find('div', class_ = 'price-change').text.strip()
            price_change1 = price_change.replace('%', '').strip()
        except:
            price_change1 = 'NaN'

        data.append({'city': city, 'district': district, 'street': street, 'full_price': full_price1, 'price_sq_m': price_sq_m1, 'price_change': price_change1, **get_dl(soup)})

For example in key list there’s value:

MEDevel.com: Open-source for Healthcare and Education

Collecting and validating open-source software for healthcare, education, enterprise, development, medical imaging, medical records, and digital pathology.

Visit Medevel

['Ypatybės:']:

But in page, where iam scraping first flat doesnt have that value and just doesnt create row at all, which is not what i need.

Code block for writing in csv:

        with open('output_kaunas.csv', 'w', encoding='utf-8', newline='') as f_output:
            csv_output = csv.DictWriter(f_output, fieldnames=data[0].keys(), extrasaction='ignore')
            csv_output.writeheader()
            csv_output.writerows(data)

So, my question is, how to create row, with feature i need, even that feature doesnt exists in first scraped item.

>Solution :

To store data in csv file you can use pandas Dataframe

df = pd.DataFrame(data).to_csv('output_kaunas.csv',index=False)

According to your full code:

from selenium import webdriver
from bs4 import BeautifulSoup
import re
import pandas as pd
import time


PATH = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(PATH)
data = []


def get_dl(soup):
    d_list = {}

    for dl in soup.findAll("dl", {"class": "obj-details"}):
        for el in dl.find_all(["dt", "dd"]):
            if el.name == 'dt':
                key = el.get_text(strip=True)
            elif key in ['Plotas:', 'Buto numeris:', 'Metai:', 'Namo numeris:', 'Kambarių sk.:', 'Aukštas:', 'Aukštų sk.:', 'Pastato tipas:', 'Šildymas:', 'Įrengimas:', 'Pastato energijos suvartojimo klasė:', 'Ypatybės:', 'Papildomos patalpos:', 'Papildoma įranga:', 'Apsauga:']:
                d_list[key] = ' '.join(el.text.strip().replace("\n", ", ").split('NAUDINGA')[0].split('m²')[0].split())
    return d_list

for puslapis in range(1, 2):
    driver.get(f'https://www.aruodas.lt/butai/kaune/puslapis/{puslapis}')
    response = driver.page_source
    soup = BeautifulSoup(response, 'html.parser')
    blocks = soup.find_all('tr', class_='list-row')
    stored_urls = []

    for url in blocks:
        try:
            stored_urls.append(url.a['href'])
        except:
            pass

    for link in stored_urls:
        driver.get(link)
        response = driver.page_source
        soup = BeautifulSoup(response, 'html.parser')
        h1 = soup.find('h1', 'obj-header-text')
        price = soup.find('div', class_ = 'price-left')

        try:
            address1 = h1.get_text(strip=True)
            address2 = re.findall(r'(.*),[^,]*$', address1)
            address = ''.join(address2)
            city, district, street = address.split(',')
        except:
            city, district, street = 'NaN'

        try:
            full_price = price.find('span', class_ = 'price-eur').text.strip()
            full_price1 = full_price.replace('€', '').replace(' ','').strip()
        except:
            full_price1 = 'NaN'

        try:
            price_sq_m = price.find('span', class_ = 'price-per').text.strip()
            price_sq_m1 = price_sq_m.replace('€/m²)', '').replace('(domina keitimas)', '').replace('(', '').replace(' ','').strip()
        except:
            price_sq_m1 = 'NaN'

        try:
            price_change = price.find('div', class_ = 'price-change').text.strip()
            price_change1 = price_change.replace('%', '').strip()
        except:
            price_change1 = 'NaN'

        data.append({'city': city, 'district': district, 'street': street, 'full_price': full_price1, 'price_sq_m': price_sq_m1, 'price_change': price_change1, **get_dl(soup)})


df = pd.DataFrame(data).to_csv('output_kaunas.csv',index=False)
Add a comment

Leave a Reply

Keep Up to Date with the Most Important News

By pressing the Subscribe button, you confirm that you have read and are agreeing to our Privacy Policy and Terms of Use

Discover more from Dev solutions

Subscribe now to keep reading and get access to the full archive.

Continue reading