from docx import Document
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import os

def read_file(file_path):
    ext = os.path.splitext(file_path)[1].lower()
    if ext == '.docx':
        doc = Document(file_path)
        return '\n'.join([para.text for para in doc.paragraphs])
    elif ext == '.txt':
        with open(file_path, encoding='utf-8') as f:
            return f.read()
    else:
        raise ValueError("Nem támogatott fájlformátum: csak .docx vagy .txt")

def get_ngrams(text, n=3):
    words = text.lower().split()
    return [' '.join(words[i:i+n]) for i in range(len(words)-n+1)]

def vectorize_ngrams(ngrams1, ngrams2):
    vectorizer = CountVectorizer().fit([' '.join(ngrams1), ' '.join(ngrams2)])
    return vectorizer.transform([' '.join(ngrams1), ' '.join(ngrams2)])

def check_similarity_from_files(file1, file2, n=3, threshold=0.5):
    text1 = read_file(file1)
    text2 = read_file(file2)
    
    ngrams1 = get_ngrams(text1, n)
    ngrams2 = get_ngrams(text2, n)
    
    vectors = vectorize_ngrams(ngrams1, ngrams2)
    similarity = cosine_similarity(vectors[0], vectors[1])[0][0]
    
    print(f"Hasonlóság: {similarity:.2%}")
    if similarity > threshold:
        print("⚠️ Gyanúsan magas hasonlóság — lehetséges plágium!")
    else:
        print("✅ Hasonlóság elfogadható.")

# Használat
check_similarity_from_files("azegyik.docx", "amasik.docx")
