fam internal practicals
fam internal practicals
while queue:
queue.append(neighbor)
return visit_order
# Example usage
if __name__ == "__main__":
graph = {
'D': ['B'],
start_node = 'A'
output:-
BFS traversal starting from node 'A': ['A', 'B', 'C', 'D', 'E', 'F']
Program:-
if visited is None:
if visit_order is None:
return visit_order
# Example usage
if __name__ == "__main__":
graph = {
'D': ['B'],
start_node = 'A'
output:-
DFS traversal starting from node 'A': ['A', 'B', 'D', 'E', 'F', 'C']
Program:-
import heapq
class Node:
priority_queue = []
visited = set()
heapq.heappush(priority_queue, start_node)
while priority_queue:
current_node = heapq.heappop(priority_queue)
if current_node.name == goal:
path = []
while current_node:
path.append(current_node.name)
current_node = current_node.parent
# Explore neighbors
heapq.heappush(priority_queue, neighbor_node)
# Example usage
if __name__ == "__main__":
graph = {
'D': ['B'],
heuristics = {
'A': 6,
'B': 4,
'C': 2,
'D': 1,
'E': 3,
start_node = 'A'
goal_node = 'F'
print("Greedy Best-First Search path from '{}' to '{}': {}".format(start_node, goal_node, result))
output:-
print("Greedy Best-First Search path from '{}' to '{}': {}".format(start_node, goal_node, result))
program:-
import heapq
class Node:
priority_queue = []
visited = set()
# Initialize the start node
heapq.heappush(priority_queue, start_node)
while priority_queue:
current_node = heapq.heappop(priority_queue)
if current_node.name == goal:
path = []
while current_node:
path.append(current_node.name)
current_node = current_node.parent
visited.add(current_node.name)
# Explore neighbors
heapq.heappush(priority_queue, neighbor_node)
# Example usage
if __name__ == "__main__":
graph = {
heuristics = {
'A': 7,
'B': 6,
'C': 2,
'D': 1,
'E': 3,
start_node = 'A'
goal_node = 'F'
output:-
A* Search path from 'A' to 'F': ['A', 'C', 'F']
program:-
confusion_matrix
df = pd.read_csv('/content/drive/MyDrive/creditcard.csv')
print(df.head())
# Preprocessing
indicates non-fraud
X = df.drop(columns=['Class']) # Features
y = df['Class'] # Target
random_state=42, stratify=y)
clf.fit(X_train, y_train)
# Make predictions
y_pred = clf.predict(X_test)
output:-
# Sample data
data = [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ]
labels = [ 'A' , 'B' , 'C' , 'D' , 'E' , 'F' , 'G' , 'H' , 'I' , 'J' ]
# Split data into training and testing sets (80% train, 20% test)
output:-
# Sample data
data = [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ]
labels = [ 'A' , 'B' , 'C' , 'D' , 'E' , 'F' , 'G' , 'H' , 'I' , 'J' ]
# Split data into training and testing sets (80% train, 20% test)
random_state= 42 )
axes[0].set_xlabel('Classifier')
axes[0].set_ylabel('Accuracy')
axes[0].set_ylim(0, 1)
axes[1].set_xlabel('Classifier')
axes[1].set_ylabel('Precision Score')
axes[1].set_ylim(0, 1)
plt.tight_layout()
plt.show()
output:-
Program:-
import numpy as np
import matplotlib.pyplot as plt
# Let's say we have data on house size (in square feet) and price (in thousands of dollars)
model = LinearRegression()
model.fit(house_size, house_price)
# Make predictions
price_pred = model.predict(house_size)
plt.show()
output:-
Coefficients: [0.23062016]
Intercept: 5.426356589147133
data = {
'study_hours' : [ 2 , 3 , 5 , 7 , 8 , 4 , 6 , 2 , 9 , 5 ],
'sleep_hours' : [ 8 , 7 , 6 , 8 , 7 , 9 , 7 , 6 , 8 , 9 ],
'previous_score' : [ 70 , 75 , 80 , 85 , 90 , 72 , 88 , 65 , 92 , 78 ],
'test_score' : [ 75 , 80 , 85 , 90 , 95 , 78 , 92 , 70 , 98 , 83 ]
df = pd.DataFrame(data)
y = df[ 'test_score' ]
model = LinearRegression()
model.fit(X_train, y_train)
# Make predictions
y_pred = model.predict(X_test)
new_student = [[ 8 , 5 , 92 ]]
predicted_score = model.predict(new_student)