M. Sc (Information Technology) Applied Artificial Intelligence
Sr.No. | Name | README | DOWNLOAD |
---|---|---|---|
Prac1A Prac1B |
1A.Design An Expert System using AIML 1B. Design An Expert System using AIML. |
Prac1A Prac1B |
Download Download |
Prac2 | Design a Chatbot using AIML | Prac2 | |
Prac3A Prac3B |
3A. Implement Bayes Theorem using Python. 3B. Bayes Theorem. |
Prac3A Prac3B |
Download Download |
Prac4A Prac4B |
4A. Write an application to implement DFS algorithm. 4B. Write an application to implement BFS algorithm. |
Prac4A Prac4B |
Download Download |
Prac5A Prac5B |
5A. Rule Based System. 5B. Rule Based System. |
Prac5A Prac5B |
|
Prac6A Prac6B |
6A. Design a Fuzzy based operations using Python / R. 6B. Design a Fuzzy based application using Python / R. |
Prac6A Prac6B |
Download Download |
Prac7A Prac7B |
7A. Implement joint probability using Python. 7B. Implement Conditional Probability using Python. |
Prac7A Prac7B |
Download Download |
Prac8A Prac8B |
8A. Write an application to implement clustering algorithm. 8B. Write an application to implement clustering algorithm. |
Prac8A Prac8B |
Download Download |
Prac9 | 9. SUPERVISED LEARNING METHODS USING PYTHON | Prac9 | Download |
Prac10 | 10. Design an Artificial Intelligence application to implement intelligent agents. | Prac10 | Download |
Prac11 | 11. Design an application to simulate language parser. | Prac11 | Download |
- Design An Expert System using AIML.
CODE
# Design An Expert System using AIML
# An Expert system for responding the patient query for identifying the flu.
# Create an empty list to store information
info = []
# Input the user's name and add it to the 'info' list
name = input("Enter Your name: ")
info.append(name)
# Input the user's age as an integer and add it to the 'info' list
age = int(input("Enter Your age: "))
info.append(age)
# Lists of common symptoms for Malaria and Diabetes
a = ["Fever", "Headache", "Tiredness", "Vomiting"]
b = ["Urinate A Lot", "Feels Thirsty", "Weight Loss", "Blurry Vision", "Feels Very Hungry", "Feels Very Tired"]
# Print the lists of symptoms
print("Common Symptoms for Malaria:", a)
print("Common Symptoms for Diabetes:", b)
# Input symptoms separated by a comma and split them into a list
symp = input("Enter Symptoms As Above Separated By Comma: ")
lst = symp.split(",")
# Print the user's information
print("User Information:")
print("Name:", info[0])
print("Age:", info[1])
print("Symptoms:")
# Loop through the list of symptoms and print each one
for symptom in lst:
print(symptom.strip())
# Check if any symptom matches the symptoms for Malaria or Diabetes
for symptom in lst:
if symptom.strip() in a:
print("You May Have Malaria")
print("Please Visit A Doctor")
break
elif symptom.strip() in b:
print("You May Have Diabetes")
print("Consider Reducing Sugar Intake")
break
else:
print("Symptoms Do Not Match Common Health Conditions")
Design An Expert System using AIML 2.
CODE
# Design An Expert System using AIML
# Input user's name
name = input("Enter your name: ")
# Input whether the user has a fever, cough, shortness of breath, sore throat, muscle pain, and headache (Y/N)
fever = input("DO YOU HAVE fever (Y/N)").lower()
cough = input("DO YOU HAVE cough (Y/N)").lower()
sob = input("DO YOU HAVE shortness of breath (Y/N)").lower()
st = input("DO YOU HAVE sore throat (Y/N)").lower()
mp = input("DO YOU HAVE muscle pain (Y/N)").lower()
hc = input("DO YOU HAVE headache(Y/N)").lower()
# Input whether the user has diarrhea, conjunctivitis, loss of taste, chest pain or pressure, and loss of speech or movement (Y/N)
diarrhoea = input("DO YOU HAVE diarrhea (Y/N)").lower()
conjunctivitis = input("DO YOU HAVE conjunctivitis (Y/N)").lower()
lot = input("DO YOU HAVE Loss OF taste (Y/N)").lower()
cp = input("DO YOU HAVE chest pain or pressure (Y/N)").lower()
lsp = input("DO YOU HAVE Loss Of Speech or movement (Y/N)").lower()
# Check for different conditions based on symptoms
if fever == "y" and cough == "y" and sob == "y" and st == "y" and mp == "y" and hc == "y":
print(name + " YOU HAVE FLU")
med = input("Sir/Ma'am would you like to look at some medicine for flu (Y/N)").lower()
if med == "y":
print("Disclaimer: Contact a doctor for better guidance")
print("There are four FDA-approved antiviral drugs recommended by CDC to treat flu this season")
print("1. Oseltamivir phosphate")
print("2. Zanamivir")
print("3. Peramivir")
print("4. Baloxavir marboxil")
elif diarrhoea == "y" and st == "y" and fever == "y" and cough == "y" and conjunctivitis == "y" and lot == "y":
print(name + " YOU HAVE CORONA")
med = input("Sir/Ma'am would you like to look at some remedies for Corona (Y/N)").lower()
if med == "y":
print("TAKE VACCINE AND QUARANTINE")
elif fever == "y" and cough == "y":
print(name + " YOU HAVE Common Cold")
med = input("Sir/Ma'am would you like to look at some remedies for common cold (Y/N)").lower()
if med == "y":
print("Disclaimer: Contact a doctor for better guidance")
print("Treatment consists of anti-inflammatories and decongestants")
print("Most people recover on their own")
print("1. Nonsteroidal anti-inflammatory drug")
print("2. Analgesic")
print("3. Antihistamine")
print("4. Cough medicine")
print("5. Decongestant")
else:
print("Unable to identify")
Design a Chatbot using AIML
CODE
<aiml version="1.0.1" encoding="UTF-8">
<!-- std-startup.xml -->
<!-- Category is an atomic AIML unit -->
<category>
<!-- Pattern to match in user input -->
<!-- If user enters "LOAD AIML B" -->
<pattern>LOAD AIML B</pattern>
<!-- Template is the response to the pattern -->
<!-- This learn an aiml file -->
<template>
<learn>basic_chat.aiml</learn>
<!-- You can add more aiml files here -->
<!--<learn>more_aiml.aiml</learn>-->
</template>
</category>
</aiml>
<aiml version="1.0.1" encoding="UTF-8">
<!-- basic_chat.aiml -->
<category>
<pattern>HELLO *</pattern>
<template>
Well, Hello Ninad!
</template>
</category>
<category>
<pattern>WHAT ARE YOU</pattern>
<template>
I'm a bot, and I'm silly!
</template>
</category>
<category>
<pattern>WHAT DO YOU DO</pattern>
<template>
I'm here to motivate you!
</template>
</category>
<category>
<pattern>WHO AM I</pattern>
<template>
You are a Professional Footballer....
</template>
</category>
</aiml>
#pip install aiml
#pip install python-aiml
import aiml
kernel = aiml.Kernel()
kernel.learn("std-startup.xml")
kernel.respond("load aiml b")
while True:
input_text = input(">Human: ")
response = kernel.respond(input_text)
print(">Bot: "+response)
Implement Bayes Theorem using Python.
CODE
def bayes_theorem(p_h, p_e_given_h, p_e_given_not_h):
p_not_h = 1 - p_h
p_e = (p_e_given_h * p_h) + (p_e_given_not_h * p_not_h)
p_h_given_e = (p_e_given_h * p_h) / p_e
return p_h_given_e
p_h = float(input("Enter the probability of NK having a cold: "))
p_e_given_h = float(
input("Enter the probability of observing sneezing when NK has a cold: ")
)
p_e_given_not_h = float(
input(
"Enter the probability of observing sneezing when NK does not have a cold: "
)
)
result = bayes_theorem(p_h, p_e_given_h, p_e_given_not_h)
print(
"NK's probability of having a cold given that he sneezes (P(H|E)) is:",
round(result, 2),
)
Implement Bayes Theorem using Python.
CODE
def drug_user(
prob_th=0.5, sensitivity=0.97, specificity=0.95, prevelance=0.005, verbose=True
):
# FORMULA
p_user = prevelance
p_non_user = 1 - prevelance
p_pos_user = sensitivity
p_neg_user = specificity
p_pos_non_user = 1 - specificity
num = p_pos_user * p_user
den = p_pos_user * p_user + p_pos_non_user * p_non_user
prob = num / den
print("Probability of the NK being a drug user is", round(prob, 3))
if verbose:
if prob > prob_th:
print("The NK could be an user")
else:
print("The NK may not be an user")
return prob
drug_user()
Write an application to implement DFS algorithm
CODE
graph = {"5": ["3", "7"], "3": ["2", "4"], "7": ["8"], "2": [], "4": ["8"], "8": []}
visited = [] # List for visited nodes.
queue = [] # Initialize a queue
def bfs(visited, graph, node): # function for BFS
visited.append(node)
queue.append(node)
while queue: # Creating loop to visit each node
m = queue.pop(0)
print(m, end=" ")
for neighbour in graph[m]:
if neighbour not in visited:
visited.append(neighbour)
queue.append(neighbour)
# Driver Code
print("Following is the Breadth-First Search")
bfs(visited, graph, "5") # function calling
Write an application to implement BFS algorithm.
CODE
######################################################
# Using a Python dictionary to act as an adjacency list
graph = {"5": ["3", "7"], "3": ["2", "4"], "7": ["8"], "2": [], "4": ["8"], "8": []}
visited = set() # Set to keep track of visited nodes of graph.
def dfs(visited, graph, node): # function for dfs
if node not in visited:
print(node)
visited.add(node)
for neighbour in graph[node]:
dfs(visited, graph, neighbour)
# Driver Code
print("Following is the Depth-First Search")
dfs(visited, graph, "5")
Rule Based System.
CODE
/* https://swish.swi-prolog.org/ */
male(vijay).
male(mahadev).
male(gaurihar).
male(omkar).
male(bajrang).
male(chaitanya).
female(vasanti).
female(indubai).
female(ashwini).
female(gayatri).
female(sangita).
parent(vijay,chaitanya).
parent(vasanti,chaitanya).
parent(vijay,gaurihar).
parent(vasanti,gaurihar).
parent(vijay,ashwini).
parent(vasanti,ashwini).
parent(mahadev,vijay).
parent(indubai,vijay).
mother(X,Y):-parent(X,Y),female(X).
father(X,Y):- parent(X,Y), male(X).
grandmother(GM,X):- mother(GM,Y) ,parent(Y,X).
grandfather(GF,X):- father(GF,Y) ,parent(Y,X).
greatgrandmother(GGM,X):- mother(GGM,GM) ,parent(GM,F),parent(F,Y),parent(Y,X).
greatgrandfather(GGF,X):- father(GGF,GF) ,parent(GF,F),parent(F,Y),parent(Y,X).
sibling(X,Y):-mother(M,X), mother(M,Y),X\=Y, father(F,X), father(F,Y).
brother(X,Y):-sibling(X,Y), male(X).
sister(X,Y):-sibling(X,Y), female(X).
uncle(U,X):- parent(Y,X), brother(U,Y).
aunt(A,X):- parent(Y,X), sister(A,Y).
nephew(N,X):- sibling(S,X),parent(S,N),male(N).
niece(N,X):-sibling(S,X), parent(S,N), female(N).
cousin(X,Y):-parent(P,Y),sibling(S,P),parent(S,X).
-----------------------------------------------------
Query
father(X,Y).
mother(X,Y).
Rule Based System.
CODE
/* https://swish.swi-prolog.org/ */
/* https://swish.swi-prolog.org/ */
/* Facts */
male(jack).
male(oliver).
male(ali).
male(james).
male(simon).
male(harry).
female(helen).
female(sophie).
female(jess).
female(lily).
parent_of(jack, jess).
parent_of(jack, lily).
parent_of(helen, jess).
parent_of(helen, lily).
parent_of(oliver, james).
parent_of(sophie, james).
parent_of(jess, simon).
parent_of(ali, simon).
parent_of(lily, harry).
parent_of(james, harry).
/* Rules */
father_of(X, Y):- male(X), parent_of(X, Y).
mother_of(X, Y):- female(X), parent_of(X, Y).
grandfather_of(X, Y):- male(X), parent_of(X, Z), parent_of(Z, Y).
grandmother_of(X, Y):- female(X), parent_of(X, Z), parent_of(Z, Y).
sister_of(X, Y):- female(X), father_of(F, Y), father_of(F, X), X \= Y.
sister_of(X, Y):- female(X), mother_of(M, Y), mother_of(M, X), X \= Y.
aunt_of(X, Y):- female(X), parent_of(Z, Y), sister_of(Z, X), !.
brother_of(X, Y):- male(X), father_of(F, Y), father_of(F, X), X \= Y.
brother_of(X, Y):- male(X), mother_of(M, Y), mother_of(M, X), X \= Y.
uncle_of(X, Y):- parent_of(Z, Y), brother_of(Z, X).
ancestor_of(X, Y):- parent_of(X, Y).
ancestor_of(X, Y):- parent_of(X, Z), ancestor_of(Z, Y).
-----------------------------------------------------
Query
father_of(X,Y).
mother_of(X,Y).
Design a Fuzzy based operations using Python / R.
CODE
# AAI 6A) AIM: Design a Fuzzy based operations using Python / R.
A = dict()
B = dict()
Y = dict()
# Initialize the dictionaries for fuzzy sets A, B, and the result
A = {"a": 0.2, "b": 0.3, "c": 0.6, "d": 0.6}
B = {"a": 0.9, "b": 0.9, "c": 0.4, "d": 0.5}
result = {}
# Display the fuzzy sets A and B
print('The First Fuzzy Set is:', A)
print('The Second Fuzzy Set is:', B)
# Fuzzy Set Union
for i in A:
if A[i] > B[i]:
result[i] = A[i]
else:
result[i] = B[i]
print("Union of two sets is", result)
# Fuzzy Set Intersection
result = {}
for i in A:
if A[i] < B[i]:
result[i] = A[i]
else:
result[i] = B[i]
print("Intersection of two sets is", result)
# Fuzzy Set Complement
result = {}
for i in A:
result[i] = round(1 - A[i], 2)
print("Complement of First set is", result)
# Fuzzy Set Difference
result = {}
for i in A:
result[i] = round(min(A[i], 1 - B[i]), 2)
print("Difference of two sets is", result)
Design a Fuzzy based application using Python / R.
CODE
# AAI 6B: AIM: Design a Fuzzy based application using Python / R.
# !pip install fuzzywuzzy
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
s1 = "I love GeeksforGeeks"
s2 = "I am loving GeeksforGeeks"
print("FuzzyWuzzy Ratio: ", fuzz.ratio(s1, s2))
print("FuzzyWuzzy PartialRatio: ", fuzz.partial_ratio(s1, s2))
print("FuzzyWuzzy TokenSortRatio: ", fuzz.token_sort_ratio(s1, s2))
print("FuzzyWuzzy TokenSetRatio: ", fuzz.token_set_ratio(s1, s2))
print("FuzzyWuzzy WRatio: ", fuzz.WRatio(s1, s2), "\n\n")
# for process library,
query = "geeks for geeks"
choices = ["geek for geek", "geek geek", "g. for geeks"]
print("List of ratios: ")
print(process.extract(query, choices), "\n")
print("Best among the above list: ", process.extractOne(query, choices))
Implement joint probability using Python.
CODE
### 7a) AIM: Implement joint probability using Python.
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
sns.set()
# Read the dataset
data = pd.read_csv("student-mat.csv")
# Create a joint plot
sns.jointplot(data=data, x="G3", y="absences", kind="kde")
# Display the plot
plt.show()
Implement Conditional probability using Python.
CODE
### 7b) AIM: Implement Conditional Probability using Python.
import pandas as pd
import numpy as np
df = pd.read_csv("student-mat.csv")
df.head(3)
len(df)
df["grade_A"] = np.where(df["G3"] * 5 >= 80, 1, 0)
df["high_absenses"] = np.where(df["absences"] >= 10, 1, 0)
df["count"] = 1
df = df[["grade_A", "high_absenses", "count"]]
df.head()
pd.pivot_table(
df,
values="count",
index=["grade_A"],
columns=["high_absenses"],
aggfunc=np.size,
fill_value=0,
)
CODE
## AAI_prac8A_clustering
### AIM: Write an application to implement clustering algorithm.
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy.cluster.hierarchy as shc
from sklearn.cluster import AgglomerativeClustering
# Read the customer data from a CSV file
customer_data = pd.read_csv("Mall_Customers.csv")
# Display the shape and the first few rows of the data
print(customer_data.shape)
customer_data.head()
# Extract the relevant columns from the data
data = customer_data.iloc[:, 3:5].values
# Create a dendrogram plot
plt.figure(figsize=(10, 7))
plt.title("Customer Dendrograms")
dend = shc.dendrogram(shc.linkage(data, method="ward"))
# Perform hierarchical clustering
cluster = AgglomerativeClustering(n_clusters=5, affinity='euclidean', linkage='ward')
cluster_labels = cluster.fit_predict(data)
# Create a scatter plot to visualize the clusters
plt.figure(figsize=(10, 7))
plt.scatter(data[:, 0], data[:, 1], c=cluster_labels, cmap='rainbow')
plt.show()
CODE
## AAI_prac8B_SyntheticClassification
from numpy import where
from sklearn.datasets import make_classification
from matplotlib import pyplot
x, y = make_classification(
n_samples=1000,
n_features=2,
n_informative=2,
n_redundant=0,
n_clusters_per_class=1,
random_state=4,
)
for class_value in range(2):
row_ix = where(y == class_value)
pyplot.scatter(x[row_ix, 0], x[row_ix, 1])
pyplot.show()
SUPERVISED LEARNING METHODS USING PYTHON
CODE
### Step 1: First we need to import pandas and numpy. Pandas are basically use for table manipulations. Using Pandas package, we are going to upload Titanic training dataset and then by using head () function we will look at first five rows.
import pandas as pd
import numpy as np
titanic= pd.read_csv("train.csv")
titanic.head()
### Step 2: Create Two Data Frames, one containing categories and one containing numbers
titanic_cat = titanic.select_dtypes(object)
titanic_num = titanic.select_dtypes(np.number)
### Step 3: Now we need to drop two columns (name column and ticket column)
titanic_cat.head()
titanic_num.head()
titanic_cat.drop(['Name','Ticket'], axis=1, inplace=True)
### Step 4: Now to find the null values present in the above column
titanic_cat.isnull().sum()
### Step 5: Replace all the null values present with the maximum count category
titanic_cat.Cabin.fillna(titanic_cat.Cabin.value_counts().idxmax(), inplace=True)
titanic_cat.Embarked.fillna(titanic_cat.Embarked.value_counts().idxmax(), inplace=True)
### Step 6: After successfully removing all the null values our new data set is ready.
titanic_cat.head(20)
### Step 7: The next step will be to replace all the categories with Numerical Labels.For that we will be using LabelEncoders Method.
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
titanic_cat = titanic_cat.apply(le.fit_transform)
titanic_cat.head()
titanic_num.isna().sum()
### Step 8: Now we have only one column left which contain null value in it (Age). Let’s replace it with mean
titanic_num.Age.fillna(titanic_num.Age.mean(), inplace=True)
titanic_num.isna().sum()
### Step 9: Now we need to remove the unnecessary columns,since the passengerid is an unnecessary column, we need to drop it
titanic_num.drop(['PassengerId'], axis=1, inplace=True)
titanic_num.head()
### Step 10: Now we will combine two data frames and make it as one
titanic_final = pd.concat([titanic_cat,titanic_num],axis=1)
titanic_final.head()
### Step 11: Now we will define dependent and independent variables
X=titanic_final.drop(['Survived'],axis=1)
Y= titanic_final['Survived']
# Step 12: Now we will be taking 80% of the data as our training set, and remaining 20% as our test set.
X_train = np.array(X[0:int(0.80*len(X))])
Y_train = np.array(Y[0:int(0.80*len(Y))])
X_test = np.array(X[int(0.80*len(X)):])
Y_test = np.array(Y[int(0.80*len(Y)):])
len(X_train), len(Y_train), len(X_test), len(Y_test)
### Step 13: Now we will import all the algorithms
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
### Step 14: Now we will initialize them in respective variables
LR = LogisticRegression()
KNN = KNeighborsClassifier()
NB = GaussianNB()
LSVM = LinearSVC()
NLSVM = SVC(kernel='rbf')
DT = DecisionTreeClassifier()
RF = RandomForestClassifier()
### Step 15: Now we will train our model
LR_fit = LR.fit(X_train, Y_train)
KNN_fit = KNN.fit(X_train, Y_train)
NB_fit = NB.fit(X_train, Y_train)
LSVM_fit = LSVM.fit(X_train, Y_train)
NLSVM_fit = NLSVM.fit(X_train, Y_train)
DT_fit = DT.fit(X_train, Y_train)
RF_fit = RF.fit(X_train, Y_train)
### Step 16: Now we need to predict the test data set and compare the accuracy score
LR_pred = LR_fit.predict(X_test)
KNN_pred = KNN_fit.predict(X_test)
NB_pred = NB_fit.predict(X_test)
LSVM_pred = LSVM_fit.predict(X_test)
NLSVM_pred = NLSVM_fit.predict(X_test)
DT_pred = DT_fit.predict(X_test)
RF_pred = RF_fit.predict(X_test)
from sklearn.metrics import accuracy_score
print("Logistic Regression is %f percent accurate" % (accuracy_score(LR_pred, Y_test)*100))
print("KNN is %f percent accurate" % (accuracy_score(KNN_pred, Y_test)*100))
print("Naive Bayes is %f percent accurate" % (accuracy_score(NB_pred, Y_test)*100))
print("Linear SVMs is %f percent accurate" % (accuracy_score(LSVM_pred, Y_test)*100))
print("Non Linear SVMs is %f percent accurate" % (accuracy_score(NLSVM_pred, Y_test)*100))
print("Decision Trees is %f percent accurate" % (accuracy_score(DT_pred, Y_test)*100))
print("Random Forests is %f percent accurate" % (accuracy_score(RF_pred, Y_test)*100))
Design an Artificial Intelligence application to implement intelligent agents.
CODE
#10A-Design an Artificial Intelligence application to implement intelligent agents.
class ClothesAgent:
def __init__(self):
self.weather = None
def get_weather(self):
# Simulating weather conditions (you can modify this as needed)
self.weather = input("Enter the weather (sunny, rainy, windy, snowy): ").lower()
def suggest_clothes(self):
if self.weather == "sunny":
print(
"It's sunny outside. You should wear light clothes, sunglasses, and sunscreen."
)
elif self.weather == "rainy":
print(
"It's rainy outside. Don't forget an umbrella, raincoat, and waterproof shoes."
)
elif self.weather == "windy":
print("It's windy outside. Wear layers and a jacket to stay warm.")
elif self.weather == "snowy":
print(
"It's snowy outside. Dress warmly with a heavy coat, gloves, and boots."
)
else:
print(
"Sorry, I don't understand the weather condition. Please enter sunny, rainy, windy, or snowy."
)
def main():
agent = ClothesAgent()
agent.get_weather()
agent.suggest_clothes()
if __name__ == "__main__":
main()
Design an application to simulate language parser.
CODE
#11-Design an application to simulate language parser.
def sentenceSegment(text):
sentences = []
start = 0
for i in range(len(text)):
if text[i] == "." or text[i] == "!" or text[i] == "?":
sentences.append(text[start : i + 1].strip())
start = i + 1
return sentences
text = "Hello, NLP world!! In this example, we are going to do the basics of Text processing which will be used later."
print(sentenceSegment(text))
#%pip install nltk
import nltk
nltk.download("punkt")
text = "Hello, NLP world!! In this example, we are going to do the basics of Text processing which will be used later."
sentences = nltk.sent_tokenize(text)
print(sentences)
import string
def remove_punctuation(input_string):
# Define a string of punctuation marks and symbols
punctuations = string.punctuation
# Remove the punctuation marks and symbols from the input string
output_string = "".join(char for char in input_string if char not in punctuations)
return output_string
text = "Hello, NLP world!! In this example, we are going to do the basics of Text processing which will be used later."
sentences = sentenceSegment(text)
puncRemovedText = remove_punctuation(text)
print(puncRemovedText)
def convertToLower(s):
return s.lower()
text = "Hello, NLP world!! In this example, we are going to do the basics of Text processing which will be used later."
puncRemovedText = remove_punctuation(text)
lowerText = convertToLower(puncRemovedText)
print(lowerText)
# in this code, we are not using any libraries
# tokenize without using any function from string or any other function.
# only using loops and if/else
def tokenize(s):
words = [] # token words should be stored here
i = 0
word = ""
while i < len(s):
if s[i] != " ":
word = word + s[i]
else:
words.append(word)
word = ""
i = i + 1
words.append(word)
return words
text = "Hello, NLP world!! In this example, we are going to do the basics of Text processing which will be used later."
puncRemovedText = remove_punctuation(text)
lowerText = convertToLower(puncRemovedText)
tokenizedText = tokenize(lowerText)
print(tokenizedText)
import nltk
# Define input text
text = "Hello, NLP world!! In this example, we are going to do the basics of Text processing which will be used later."
# sentence segmentation - removal of punctuations and converting to lowercase
sentences = nltk.sent_tokenize(text)
puncRemovedText = remove_punctuation(text)
lowerText = convertToLower(puncRemovedText)
# Tokenize the text
tokens = nltk.word_tokenize(lowerText)
# Print the tokens
print(tokens)
import nltk
sentence = "We're going to John's house today."
tokens = nltk.word_tokenize(sentence)
print(tokens)