Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 83 additions & 0 deletions lib/features/transcription/data/deepgram_service.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
import 'dart:convert';
import 'dart:io';
import 'dart:async';
import 'package:flutter_dotenv/flutter_dotenv.dart';
import 'package:http/http.dart' as http;

class DeepgramService {
final String? _apiKey;

DeepgramService({String? apiKey}) : _apiKey = apiKey?.trim();

String _resolveApiKey() {
final configuredKey = _apiKey;
if (configuredKey != null && configuredKey.isNotEmpty) {
return configuredKey;
}

try {
return (dotenv.env['DEEPGRAM_API_KEY'] ?? '').trim();
} catch (_) {
return '';
}
}

Future<String> transcribe(String recordingPath) async {
final apiKey = _resolveApiKey();
if (apiKey.isEmpty) {
throw Exception('Missing DEEPGRAM_API_KEY in environment');
}

final uri = Uri.parse('https://api.deepgram.com/v1/listen?model=nova-2');

final file = File(recordingPath);
if (!await file.exists()) {
throw Exception('Recording file not found');
}

final bytes = await file.readAsBytes();

http.Response response;
try {
response = await http.post(
uri,
headers: {
'Authorization': 'Token $apiKey',
'Content-Type': 'audio/m4a',
},
body: bytes,
).timeout(const Duration(seconds: 30));
} on TimeoutException {
throw Exception('Deepgram request timed out after 30 seconds');
}

if (response.statusCode == 200) {
final decodedResponse = json.decode(response.body);

if (decodedResponse is! Map<String, dynamic>) {
throw Exception('Deepgram returned unexpected response format');
}

final results = decodedResponse['results'];
if (results is! Map<String, dynamic>) {
return 'No speech detected';
}

final channels = results['channels'];
if (channels is! List || channels.isEmpty || channels.first is! Map<String, dynamic>) {
return 'No speech detected';
}

final alternatives = (channels.first as Map<String, dynamic>)['alternatives'];
if (alternatives is! List || alternatives.isEmpty || alternatives.first is! Map<String, dynamic>) {
return 'No speech detected';
}

final transcript = (alternatives.first as Map<String, dynamic>)['transcript'];
final result = transcript is String ? transcript.trim() : '';
return result.isNotEmpty ? result : 'No speech detected';
} else {
throw Exception('Deepgram failed: ${response.statusCode}');
}
}
}
18 changes: 18 additions & 0 deletions lib/features/transcription/data/gemini_service.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import 'package:doc_pilot_new_app_gradel_fix/services/chatbot_service.dart';

class GeminiService {
final ChatbotService _chatbotService = ChatbotService();

Future<String> generateSummary(String transcription) async {
return await _chatbotService.getGeminiResponse(
"Generate a summary of the conversation based on this transcription: $transcription",
);
}

Future<String> generatePrescription(String transcription) async {
await Future.delayed(const Duration(seconds: 3));
return await _chatbotService.getGeminiResponse(
"Generate a prescription based on the conversation in this transcription: $transcription",
);
}
}
23 changes: 23 additions & 0 deletions lib/features/transcription/domain/transcription_model.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
class TranscriptionModel {
final String rawTranscript;
final String summary;
final String prescription;

const TranscriptionModel({
this.rawTranscript = '',
this.summary = '',
this.prescription = '',
});

TranscriptionModel copyWith({
String? rawTranscript,
String? summary,
String? prescription,
}) {
return TranscriptionModel(
rawTranscript: rawTranscript ?? this.rawTranscript,
summary: summary ?? this.summary,
prescription: prescription ?? this.prescription,
);
}
}
173 changes: 173 additions & 0 deletions lib/features/transcription/presentation/transcription_controller.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
import 'dart:developer' as developer;
import 'dart:math';
import 'dart:async';
import 'package:flutter/foundation.dart';
import 'package:path_provider/path_provider.dart';
import 'package:record/record.dart';
import 'package:permission_handler/permission_handler.dart';
import '../data/deepgram_service.dart';
import '../data/gemini_service.dart';
import '../domain/transcription_model.dart';

enum TranscriptionState { idle, recording, transcribing, processing, done, error }

class TranscriptionController extends ChangeNotifier {
final _audioRecorder = AudioRecorder();
final _deepgramService = DeepgramService();
final _geminiService = GeminiService();

TranscriptionState state = TranscriptionState.idle;
TranscriptionModel data = const TranscriptionModel();
String? errorMessage;
String _recordingPath = '';

// Waveform — kept here since it's driven by recording state
final List<double> waveformValues = List.filled(40, 0.0);
Timer? _waveformTimer;

bool get isRecording => state == TranscriptionState.recording;
bool get isProcessing =>
state == TranscriptionState.transcribing ||
state == TranscriptionState.processing;

String get transcription => data.rawTranscript;
String get summary => data.summary;
String get prescription => data.prescription;

Future<bool> requestPermissions() async {
final status = await Permission.microphone.request();

if (status.isGranted) {
return true;
}

if (status.isPermanentlyDenied) {
_setError('Microphone permission permanently denied. Please enable it in settings.');
return false;
}

_setError('Microphone permission denied');
return false;
}

Future<void> toggleRecording() async {
if (isRecording) {
await _stopRecording();
} else {
await _startRecording();
}
}

Future<void> _startRecording() async {
try {
if (!await _audioRecorder.hasPermission()) {
final granted = await requestPermissions();
if (!granted) {
return;
}
}

final directory = await getTemporaryDirectory();
_recordingPath =
'${directory.path}/recording_${DateTime.now().millisecondsSinceEpoch}.m4a';

await _audioRecorder.start(
RecordConfig(
encoder: AudioEncoder.aacLc,
bitRate: 128000,
sampleRate: 44100,
),
path: _recordingPath,
);

// Reset previous data
data = const TranscriptionModel();
state = TranscriptionState.recording;
_startWaveformAnimation();
notifyListeners();

developer.log('Started recording to: $_recordingPath');
} catch (e) {
_setError('Error starting recording: $e');
}
}

Future<void> _stopRecording() async {
try {
_waveformTimer?.cancel();
_resetWaveform();

await _audioRecorder.stop();
state = TranscriptionState.transcribing;
notifyListeners();

developer.log('Recording stopped, transcribing...');
await _transcribe();
} catch (e) {
_setError('Error stopping recording: $e');
}
}

Future<void> _transcribe() async {
try {
final transcript = await _deepgramService.transcribe(_recordingPath);

data = data.copyWith(rawTranscript: transcript);
state = TranscriptionState.processing;
notifyListeners();

if (transcript.isNotEmpty && transcript != 'No speech detected') {
await _processWithGemini(transcript);
} else {
state = TranscriptionState.done;
notifyListeners();
}
} catch (e) {
_setError('Transcription error: $e');
}
}

Future<void> _processWithGemini(String transcript) async {
try {
final summary = await _geminiService.generateSummary(transcript);
final prescription = await _geminiService.generatePrescription(transcript);

data = data.copyWith(summary: summary, prescription: prescription);
state = TranscriptionState.done;
notifyListeners();

developer.log('Gemini processing complete');
} catch (e) {
_setError('Gemini error: $e');
}
}

void _startWaveformAnimation() {
_waveformTimer = Timer.periodic(const Duration(milliseconds: 100), (_) {
for (int i = 0; i < waveformValues.length; i++) {
waveformValues[i] = Random().nextDouble();
}
notifyListeners();
});
}

void _resetWaveform() {
for (int i = 0; i < waveformValues.length; i++) {
waveformValues[i] = 0.0;
}
}

void _setError(String message) {
errorMessage = message;
state = TranscriptionState.error;
notifyListeners();
developer.log(message);
}

@override
void dispose() {
_waveformTimer?.cancel();
_audioRecorder.dispose();
super.dispose();
}
}
Loading
Loading