From 7c72b004b7f72f294f78864b7e5aa06f12059aca Mon Sep 17 00:00:00 2001
From: Prannesh SathyaMoorthy <cb.en.u4cse16231@cb.students.amrita.edu>
Date: Wed, 6 Mar 2019 13:08:04 +0530
Subject: [PATCH] Upload New File

---
 cse16231_p2_21.py | 64 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 64 insertions(+)
 create mode 100644 cse16231_p2_21.py

diff --git a/cse16231_p2_21.py b/cse16231_p2_21.py
new file mode 100644
index 0000000..66396b7
--- /dev/null
+++ b/cse16231_p2_21.py
@@ -0,0 +1,64 @@
+from bs4 import BeautifulSoup
+import requests
+page_link="https://www.google.com/search?q=top+10+technology&source=lnms&tbm=nws&sa=X&ved=0ahUKEwjrn8v15uzgAhWJfn0KHQPSBqAQ_AUIDygC&biw=1301&bih=670"
+page_response=requests.get(page_link,timeout=25)
+#page_content=BeautifulSoup(page_response.content,"html.parser")
+Soup=BeautifulSoup(page_response.content,"html.parser")
+print("Google")
+for i in range(0,10):
+	a=Soup.find_all('h3')[i].get_text()
+	print(a)
+
+from bs4 import BeautifulSoup
+import requests
+page_link="https://finance.yahoo.com/tech/"
+page_response=requests.get(page_link,timeout=25)
+#page_content=BeautifulSoup(page_response.content,"html.parser")
+Soup=BeautifulSoup(page_response.content,"html.parser")
+print("Yahoo")
+for i in range(0,10):
+	a1=Soup.find_all('h3')[i].get_text()
+	print(a1)
+
+
+
+set1 = set(a.split(' '))
+set2 = set(a1.split(' '))
+print (set1 != set2)
+print(set1,"==",set2)
+
+set1 = set(a1.split(' '))
+set2 = set(a.split(' '))
+print (set1 == 'Samsung')
+print(set1,"==",set2)
+
+#hist ={}
+#for i in range(0,10):
+#		hist[set1] = hist.get(set1,0) + 1
+#return hist
+
+#def count_elements(seq) -> dict:
+#     """Tally elements from `seq`."""
+#     hist = {}
+#     for i in seq:
+#         hist[i] = hist.get(i, 0) + 1
+#     return hist
+
+
+
+#from bs4 import BeautifulSoup
+#import requests
+#page_link="https://www.rediff.com/money/technology"
+#page_response=requests.get(page_link,timeout=25)
+#page_content=BeautifulSoup(page_response.content,"html.parser")
+#Soup=BeautifulSoup(page_response.content,"html.parser")
+#print("Rediff")
+#for i in range(0,10):
+#	a=Soup.find_all('h3')[i].get_text()
+##	print(a)##
+##
+#a=Soup.find_all('h3')[1].get_text()
+#print(a)
+#<u class="StretchedBox" data-reactid="71"></u>
+#https://www.rediff.com/money/technology
+
-- 
GitLab