Posts

 import heapq def dijkstra(graph, start, end):     # Priority queue: (cost, current_node)     queue = [(0, start)]     distances = {node: float('inf') for node in graph}     distances[start] = 0     predecessors = {node: None for node in graph}     while queue:         current_cost, current_node = heapq.heappop(queue)         if current_node == end:             break         for neighbor, cost in graph[current_node].items():             new_cost = current_cost + cost             if new_cost < distances[neighbor]:                 distances[neighbor] = new_cost                 predecessors[neighbor] = current_node                 heapq.heappush(queue, (new_...
 from collections import Counter def count_word_frequency(file_path, top_n):     try:         words = open(file_path, encoding='utf-8').read().lower().split()         words = [''.join(c for c in word if c.isalnum()) for word in words]         print(Counter(words).most_common(top_n))     except Exception as e:         print(f"Error: {e}") # Usage count_word_frequency("example.txt", 10)
 clc; clear; close all; dataLength = 16;             % Length of the signal snr = 10;                    % Signal-to-noise ratio in dB signal = randn(1, dataLength);  % Random signal (e.g., Gaussian) signalPower = mean(signal.^2); snrLinear = 10^(snr / 10); noise = sqrt(signalPower / snrLinear) * randn(size(signal)); noisySignal = signal + noise; figure; subplot(2, 1, 1); plot(signal); title('Original Signal'); xlabel('Samples'); ylabel('Amplitude'); subplot(2, 1, 2); plot(noisySignal); title('Signal with Noise'); xlabel('Samples'); ylabel('Amplitude');

Pos using Seq to seq

 # Imports import numpy as np from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Embedding, LSTM, Dense from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split # Example Data (Replace with your dataset) sentences = ["I love programming", "You are learning", "We enjoy coding"] tags = [["PRON", "VERB", "NOUN"], ["PRON", "VERB", "VERB"], ["PRON", "VERB", "NOUN"]] # Preprocessing tokenizer = Tokenizer() tokenizer.fit_on_texts(sentences) word_index = tokenizer.word_index input_sequences = tokenizer.texts_to_sequences(sentences) tag_tokenizer = Tokenizer() tag_tokenizer.fit_on_texts(tags) tag_index = tag_tokenizer.word_index output_sequences = tag_tokenizer.texts_to_sequences(tags) # Padding max_seq_length = max(len(seq) ...

DPCO - 3 Bit synchoronous counter

Image
 

New Memory bounded A* Algorithm

Image
 def astar(start, goal):     opened = [[start, 0]]  # List of open nodes     closed = []  # List of closed nodes     visited = set()  # Set of visited nodes     while opened:         val, min_cost = opened.pop(0)                  if val == goal:             break                  visited.add(val)                  for neighbor, cost in nodes[val]:             if neighbor not in visited:                 new_cost = min_cost + cost                 opened.append([neighbor, new_cost])             else:                 continue              ...

Memory Bounded A* algorithm

Image
 nodes =f A [[W. 6], [^{\prime}V,3]] B: [[A6]. [C. 3], [TV. 211. C: [[B, 3] [^{\prime}D^{\prime},1] [ES]. D: [[B. 2], ['C, 1], [E, 8]]. E: [[^{\prime}C^{\prime\prime},S],[^{\prime\prime}DY,S],[T,S],I [T,5],[T^{\prime},5]] \div[I^{2}A^{2},3],[G^{2},1],[^{2}H^{2},7]] 'G': [[F, 1], [T, 3]], 'W': [[^{\prime}F,T],[TT,2]] T =[I^{\prime}G^{\prime\prime},3],[^{\prime}H^{\prime},2],[T^{\prime},51,[T^{\prime},3]]. 'J: [[^{\prime}E^{\prime},5],[^{\prime}\Gamma,3]] 1 h=1 A^{\prime}:10 B^{\prime}:8 C^{\prime}:5 D^{\prime}:7. T:3. T:6, {}G^{\prime}:5. H^{\prime}:3 T:1. J^{\prime}:0 def astar(start, goal): opened [] closed visited = set() opened.append([start, h[start]])while opened: min - 1000 val=" for i in opened:if i[1] <min:min -[1] val - i[0] closed.append(val) visited.add(val) if goal not in closed for i in nodes[val]: if i[0] not in visited: opened.append([i[0], (min-h[val]+[1]+b[i[0] else: break opened.remove([val, min]) closed closed[::-1] min 1000 for i in o...