0% found this document useful (0 votes)
13 views26 pages

fam internal practicals

this is a helpful document

Uploaded by

kolekarsiddhi056
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
13 views26 pages

fam internal practicals

this is a helpful document

Uploaded by

kolekarsiddhi056
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 26

Program:-

from collections import deque

def bfs(graph, start):

visited = set() # Set to keep track of visited nodes

queue = deque([start]) # Initialize a queue with the starting node

visit_order = [] # List to keep track of the order of visited nodes

while queue:

current = queue.popleft() # Dequeue a vertex from the queue

if current not in visited:

visited.add(current) # Mark the node as visited

visit_order.append(current) # Add it to the visit order

# Add all unvisited neighbors to the queue

for neighbor in graph[current]:

if neighbor not in visited:

queue.append(neighbor)

return visit_order

# Example usage

if __name__ == "__main__":

# Define a graph as an adjacency list

graph = {

'A': ['B', 'C'],

'B': ['A', 'D', 'E'],

'C': ['A', 'F'],

'D': ['B'],

'E': ['B', 'F'],

'F': ['C', 'E']


}

start_node = 'A'

result = bfs(graph, start_node)

print("BFS traversal starting from node '{}': {}".format(start_node, result))

output:-

BFS traversal starting from node 'A': ['A', 'B', 'C', 'D', 'E', 'F']
Program:-

def dfs(graph, start, visited=None, visit_order=None):

if visited is None:

visited = set() # Set to keep track of visited nodes

if visit_order is None:

visit_order = [] # List to keep track of the order of visited nodes

visited.add(start) # Mark the node as visited

visit_order.append(start) # Add it to the visit order

# Recur for all the vertices adjacent to this vertex

for neighbor in graph[start]:

if neighbor not in visited:

dfs(graph, neighbor, visited, visit_order)

return visit_order

# Example usage

if __name__ == "__main__":

# Define a graph as an adjacency list

graph = {

'A': ['B', 'C'],

'B': ['A', 'D', 'E'],

'C': ['A', 'F'],

'D': ['B'],

'E': ['B', 'F'],

'F': ['C', 'E']

start_node = 'A'

result = dfs(graph, start_node)


print("DFS traversal starting from node '{}': {}".format(start_node, result))

output:-

DFS traversal starting from node 'A': ['A', 'B', 'D', 'E', 'F', 'C']
Program:-

import heapq
class Node:

def __init__(self, name, heuristic):

self.name = name # Node name

self.heuristic = heuristic # Heuristic value

self.parent = None # Parent node

def __lt__(self, other):

return self.heuristic < other.heuristic # For priority queue sorting

def greedy_best_first_search(graph, heuristics, start, goal):

# Create a priority queue

priority_queue = []

# Create a set to keep track of visited nodes

visited = set()

# Initialize the start node

start_node = Node(start, heuristics[start])

heapq.heappush(priority_queue, start_node)

while priority_queue:

# Get the node with the lowest heuristic value

current_node = heapq.heappop(priority_queue)

# If we reached the goal, reconstruct the path

if current_node.name == goal:

path = []

while current_node:

path.append(current_node.name)

current_node = current_node.parent

return path[::-1] # Return reversed path


visited.add(current_node.name)

# Explore neighbors

for neighbor in graph[current_node.name]:

if neighbor not in visited:

neighbor_node = Node(neighbor, heuristics[neighbor])

neighbor_node.parent = current_node # Set the parent for path reconstruction

heapq.heappush(priority_queue, neighbor_node)

return None # Return None if the goal is not reachable

# Example usage

if __name__ == "__main__":

# Define a graph as an adjacency list

graph = {

'A': ['B', 'C'],

'B': ['A', 'D', 'E'],

'C': ['A', 'F'],

'D': ['B'],

'E': ['B', 'F'],

'F': ['C', 'E']

# Define heuristic values for each node (example values)

heuristics = {

'A': 6,

'B': 4,

'C': 2,

'D': 1,

'E': 3,

'F': 0 # Goal node


}

start_node = 'A'

goal_node = 'F'

result = greedy_best_first_search(graph, heuristics, start_node, goal_node)

print("Greedy Best-First Search path from '{}' to '{}': {}".format(start_node, goal_node, result))

output:-

print("Greedy Best-First Search path from '{}' to '{}': {}".format(start_node, goal_node, result))
program:-

import heapq

class Node:

def __init__(self, name, g, h):

self.name = name # Node name

self.g = g # Cost from start to this node

self.h = h # Heuristic cost from this node to goal

self.f = g + h # Total cost

self.parent = None # Parent node for path reconstruction

def __lt__(self, other):

return self.f < other.f # For priority queue sorting

def a_star_search(graph, heuristics, start, goal):

# Create a priority queue

priority_queue = []

# Create a set to keep track of visited nodes

visited = set()
# Initialize the start node

start_node = Node(start, 0, heuristics[start])

heapq.heappush(priority_queue, start_node)

while priority_queue:

# Get the node with the lowest f value

current_node = heapq.heappop(priority_queue)

# If we reached the goal, reconstruct the path

if current_node.name == goal:

path = []

while current_node:

path.append(current_node.name)

current_node = current_node.parent

return path[::-1] # Return reversed path

visited.add(current_node.name)

# Explore neighbors

for neighbor, cost in graph[current_node.name].items():

if neighbor not in visited:

g_cost = current_node.g + cost # Cost from start to neighbor

h_cost = heuristics[neighbor] # Heuristic cost to goal

neighbor_node = Node(neighbor, g_cost, h_cost)

neighbor_node.parent = current_node # Set the parent for path reconstruction

heapq.heappush(priority_queue, neighbor_node)

return None # Return None if the goal is not reachable

# Example usage
if __name__ == "__main__":

# Define a graph as an adjacency list with costs

graph = {

'A': {'B': 1, 'C': 4},

'B': {'A': 1, 'D': 2, 'E': 5},

'C': {'A': 4, 'F': 3},

'D': {'B': 2},

'E': {'B': 5, 'F': 1},

'F': {'C': 3, 'E': 1}

# Define heuristic values for each node (example values)

heuristics = {

'A': 7,

'B': 6,

'C': 2,

'D': 1,

'E': 3,

'F': 0 # Goal node

start_node = 'A'

goal_node = 'F'

result = a_star_search(graph, heuristics, start_node, goal_node)

print("A* Search path from '{}' to '{}': {}".format(start_node, goal_node, result))

output:-
A* Search path from 'A' to 'F': ['A', 'C', 'F']
program:-

# Importing necessary libraries


import pandas as pd

from sklearn.model_selection import train_test_split

from sklearn.ensemble import RandomForestClassifier

from sklearn.metrics import accuracy_score, classification_report,

confusion_matrix

# Load the dataset

# Replace 'your_dataset.csv' with the actual dataset file

df = pd.read_csv('/content/drive/MyDrive/creditcard.csv')

# Exploring the dataset (Optional: Just for better understanding)

print(df.head())

# Preprocessing

# Assuming 'Class' is the target variable, where 1 indicates fraud and 0

indicates non-fraud

X = df.drop(columns=['Class']) # Features

y = df['Class'] # Target

# Split the data into training and test sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,

random_state=42, stratify=y)

# Initialize the RandomForestClassifier

clf = RandomForestClassifier(n_estimators=100, random_state=42)

# Train the classifier

clf.fit(X_train, y_train)

# Make predictions

y_pred = clf.predict(X_test)

# Evaluate the model


print("Accuracy:", accuracy_score(y_test, y_pred))

print("Classification Report:\n", classification_report(y_test, y_pred))

print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))

output:-

Time V1 V2 V3 V4 V5 V6 V7 \ 0 0.0 -1.359807 -0.072781 2.536347 1.378155 -0.338321 0.462388


0.239599 1 0.0 1.191857 0.266151 0.166480 0.448154 0.060018 -0.082361 -0.078803 2 1.0 -
1.358354 -1.340163 1.773209 0.379780 -0.503198 1.800499 0.791461 3 1.0 -0.966272 -0.185226
1.792993 -0.863291 -0.010309 1.247203 0.237609 4 2.0 -1.158233 0.877737 1.548718 0.403034 -
0.407193 0.095921 0.592941 V8 V9 ... V21 V22 V23 V24 V25 \ 0 0.098698 0.363787 ... -0.018307
0.277838 -0.110474 0.066928 0.128539 1 0.085102 -0.255425 ... -0.225775 -0.638672 0.101288 -
0.339846 0.167170 2 0.247676 -1.514654 ... 0.247998 0.771679 0.909412 -0.689281 -0.327642 3
0.377436 -1.387024 ... -0.108300 0.005274 -0.190321 -1.175575 0.647376 4 -0.270533 0.817739 ... -
0.009431 0.798278 -0.137458 0.141267 -0.206010 V26 V27 V28 Amount Class 0 -0.189115 0.133558
-0.021053 149.62 0 1 0.125895 -0.008983 0.014724 2.69 0 2 -0.139097 -0.055353 -0.059752 378.66
0 3 -0.221929 0.062723 0.061458 123.50 0 4 0.502292 0.219422 0.215153 69.99 0 [5 rows x 31
columns] Accuracy: 0.9995201479348805 Classification Report: precision recall f1-score support 0
1.00 1.00 1.00 85295 1 0.96 0.76 0.85 148 accuracy 1.00 85443 macro avg 0.98 0.88 0.92 85443
weighted avg 1.00 1.00 1.00 85443 Confusion Matrix: [[85290 5] [ 36 112]]
Program:-

from sklearn.model_selection import train_test_split

# Sample data

data = [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ]

labels = [ 'A' , 'B' , 'C' , 'D' , 'E' , 'F' , 'G' , 'H' , 'I' , 'J' ]

# Split data into training and testing sets (80% train, 20% test)

X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size= 0.2 ,


random_state= 42 )

# Print the split datasets

print ( "Training Data:" , X_train)

print ( "Training Labels:" , y_train)

print ( "Testing Data:" , X_test)

print ( "Testing Labels:" , y_test)

output:-

from sklearn.model_selection import train_test_split

# Sample data

data = [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ]

labels = [ 'A' , 'B' , 'C' , 'D' , 'E' , 'F' , 'G' , 'H' , 'I' , 'J' ]

# Split data into training and testing sets (80% train, 20% test)

X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size= 0.2 ,

random_state= 42 )

# Print the split datasets

print ( "Training Data:" , X_train)

print ( "Training Labels:" , y_train)

print ( "Testing Data:" , X_test)

print ( "Testing Labels:" , y_test)


program:-

import matplotlib.pyplot as plt

import seaborn as sns

# Sample accuracy and precision scores for different classifiers

classifiers = ['SVC', 'Random Forest', 'Naive Bayes']

accuracies = [0.95, 0.92, 0.90] # Example accuracy scores

precision_scores = [0.94, 0.91, 0.89] # Example precision scores

# Create subplots for accuracy and precision comparison

fig, axes = plt.subplots(1, 2, figsize=(15, 5))

# Plot bar graph for accuracies

axes[0].bar(classifiers, accuracies, color='skyblue')

axes[0].set_xlabel('Classifier')

axes[0].set_ylabel('Accuracy')

axes[0].set_title('Accuracy Comparison of Different Classifiers')

axes[0].set_ylim(0, 1)

# Plot bar graph for precision scores

axes[1].bar(classifiers, precision_scores, color='lightgreen')

axes[1].set_xlabel('Classifier')

axes[1].set_ylabel('Precision Score')

axes[1].set_title('Precision Score Comparison of Different Classifiers')

axes[1].set_ylim(0, 1)

plt.tight_layout()

plt.show()

output:-
Program:-

import numpy as np
import matplotlib.pyplot as plt

from sklearn.linear_model import LinearRegression

from sklearn.metrics import mean_squared_error, r2_score

# Sample dataset (you can replace this with your own)

# Let's say we have data on house size (in square feet) and price (in thousands of dollars)

house_size = np.array([ 1000 , 1500 , 1800 , 2200 , 2500 , 3000 ]).reshape( -1 , 1 )

house_price = np.array([ 250 , 350 , 400 , 500 , 600 , 700 ])

# Create a linear regression model

model = LinearRegression()

# Train the model

model.fit(house_size, house_price)

# Make predictions

price_pred = model.predict(house_size)

# Print the coefficients

print ( 'Coefficients:' , model.coef_)

print ( 'Intercept:' , model.intercept_)

# Evaluate the model

print ( 'Mean squared error (MSE): %.2f' % mean_squared_error(house_price, price_pred))

print ( 'Coefficient of determination (R^2): %.2f' % r2_score(house_price, price_pred))

# Plot the results

plt.scatter(house_size, house_price, color= 'blue' )

plt.plot(house_size, price_pred, color= 'red' , linewidth= 2 )

plt.xlabel( 'House Size (sq ft)' )

plt.ylabel( 'House Price (thousands of $)' )

plt.title( 'Simple Linear Regression' )

plt.show()

output:-

Coefficients: [0.23062016]

Intercept: 5.426356589147133

Mean squared error (MSE): 185.72

Coefficient of determination (R^2): 0.99


Program:-
import pandas as pd

from sklearn.linear_model import LinearRegression

from sklearn.model_selection import train_test_split

from sklearn.metrics import mean_squared_error, r2_score

# This dataset represents factors affecting student test scores

data = {

'study_hours' : [ 2 , 3 , 5 , 7 , 8 , 4 , 6 , 2 , 9 , 5 ],

'sleep_hours' : [ 8 , 7 , 6 , 8 , 7 , 9 , 7 , 6 , 8 , 9 ],

'previous_score' : [ 70 , 75 , 80 , 85 , 90 , 72 , 88 , 65 , 92 , 78 ],

'test_score' : [ 75 , 80 , 85 , 90 , 95 , 78 , 92 , 70 , 98 , 83 ]

df = pd.DataFrame(data)

# Separate features (X) and target variable (y)

X = df[[ 'study_hours' , 'sleep_hours' , 'previous_score' ]]

y = df[ 'test_score' ]

# Split data into training and testing sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2 , random_state= 42 )

# Create and train the model

model = LinearRegression()

model.fit(X_train, y_train)

# Make predictions

y_pred = model.predict(X_test)

# Print the coefficients

print ( 'Coefficients:' , model.coef_)

print ( 'Intercept:' , model.intercept_)

# Evaluate the model

print ( 'Mean squared error (MSE): %.2f' % mean_squared_error(y_test, y_pred))

print ( 'Coefficient of determination (R^2): %.2f' % r2_score(y_test, y_pred))

new_student = [[ 8 , 5 , 92 ]]

predicted_score = model.predict(new_student)

print ( 'Predicted score for new student:' , predicted_score[ 0 ])

You might also like