from collections import Counter def count_word_frequency(file_path, top_n): try: words = open(file_path, encoding='utf-8').read().lower().split() words = [''.join(c for c in word if c.isalnum()) for word in words] print(Counter(words).most_common(top_n)) except Exception as e: print(f"Error: {e}") # Usage count_word_frequency("example.txt", 10)
Posts
Showing posts from December, 2024
- Get link
- X
- Other Apps
clc; clear; close all; dataLength = 16; % Length of the signal snr = 10; % Signal-to-noise ratio in dB signal = randn(1, dataLength); % Random signal (e.g., Gaussian) signalPower = mean(signal.^2); snrLinear = 10^(snr / 10); noise = sqrt(signalPower / snrLinear) * randn(size(signal)); noisySignal = signal + noise; figure; subplot(2, 1, 1); plot(signal); title('Original Signal'); xlabel('Samples'); ylabel('Amplitude'); subplot(2, 1, 2); plot(noisySignal); title('Signal with Noise'); xlabel('Samples'); ylabel('Amplitude');
Pos using Seq to seq
- Get link
- X
- Other Apps
# Imports import numpy as np from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Embedding, LSTM, Dense from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split # Example Data (Replace with your dataset) sentences = ["I love programming", "You are learning", "We enjoy coding"] tags = [["PRON", "VERB", "NOUN"], ["PRON", "VERB", "VERB"], ["PRON", "VERB", "NOUN"]] # Preprocessing tokenizer = Tokenizer() tokenizer.fit_on_texts(sentences) word_index = tokenizer.word_index input_sequences = tokenizer.texts_to_sequences(sentences) tag_tokenizer = Tokenizer() tag_tokenizer.fit_on_texts(tags) tag_index = tag_tokenizer.word_index output_sequences = tag_tokenizer.texts_to_sequences(tags) # Padding max_seq_length = max(len(seq) ...