Danielfonseca1212 commited on
Commit
bc0f945
·
verified ·
1 Parent(s): 79ffe8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -24
app.py CHANGED
@@ -2,13 +2,12 @@
2
  import gradio as gr
3
  import os
4
  import httpx
5
- from huggingface_hub import list_models # ← CORREÇÃO: sem ModelFilter
6
  from datasets import load_dataset
7
 
8
  HF_TOKEN = os.getenv("HF_TOKEN")
9
 
10
  def search_models(query="legal", language="pt", limit=5):
11
- """Busca modelos no HF Hub"""
12
  try:
13
  results = list(
14
  list_models(
@@ -20,38 +19,30 @@ def search_models(query="legal", language="pt", limit=5):
20
  token=HF_TOKEN,
21
  )
22
  )
23
-
24
  output = ""
25
  for m in results[:int(limit)]:
26
  output += f"**{m.modelId}**\n"
27
  output += f"- Task: {m.pipeline_tag}\n"
28
  output += f"- Downloads: {m.downloads:,}\n"
29
  output += f"- URL: https://huggingface.co/{m.modelId}\n\n"
30
-
31
  return output if output else "Nenhum modelo encontrado."
32
  except Exception as e:
33
  return f"Erro: {str(e)}"
34
 
35
  def analyze_text(text, task="summarization"):
36
- """Analisa texto via Inference API"""
37
  try:
38
  models = {
39
  "summarization": "facebook/bart-large-cnn",
40
  "text-classification": "nlpaueb/legal-bert-base-uncased",
41
  }
42
-
43
  model_id = models.get(task, "facebook/bart-large-cnn")
44
  url = f"https://api-inference.huggingface.co/models/{model_id}"
45
  headers = {"Content-Type": "application/json"}
46
-
47
  if HF_TOKEN:
48
- headers["Authorization"] = f"Bearer {os.getenv('HF_TOKEN')}"
49
-
50
  payload = {"inputs": text[:512]}
51
-
52
  with httpx.Client(timeout=60.0) as client:
53
  resp = client.post(url, headers=headers, json=payload)
54
-
55
  if resp.status_code == 200:
56
  return str(resp.json())
57
  else:
@@ -60,7 +51,6 @@ def analyze_text(text, task="summarization"):
60
  return f"Erro: {str(e)}"
61
 
62
  def explore_dataset(dataset_id, n_samples=3):
63
- """Explora dataset do HF"""
64
  try:
65
  ds = load_dataset(
66
  dataset_id,
@@ -68,10 +58,8 @@ def explore_dataset(dataset_id, n_samples=3):
68
  token=HF_TOKEN,
69
  trust_remote_code=False,
70
  )
71
-
72
  output = f"**Dataset:** {dataset_id}\n\n"
73
  output += f"**Colunas:** {', '.join(ds.column_names)}\n\n"
74
-
75
  for i, sample in enumerate(ds.to_list()[:int(n_samples)]):
76
  output += f"--- Amostra {i+1} ---\n"
77
  for key, val in sample.items():
@@ -79,12 +67,10 @@ def explore_dataset(dataset_id, n_samples=3):
79
  val = val[:300] + "..."
80
  output += f"{key}: {val}\n"
81
  output += "\n"
82
-
83
  return output
84
  except Exception as e:
85
  return f"Erro: {str(e)}"
86
 
87
- # Interface Gradio
88
  with gr.Blocks(title="LEX - Assistente Jurídico", theme=gr.themes.Soft()) as demo:
89
  gr.Markdown("# ⚖️ LEX - Assistente Jurídico MCP")
90
  gr.Markdown("Powered by Hugging Face Hub + FastMCP")
@@ -98,7 +84,6 @@ with gr.Blocks(title="LEX - Assistente Jurídico", theme=gr.themes.Soft()) as de
98
  model_btn = gr.Button("Buscar", variant="primary")
99
  with gr.Column():
100
  model_output = gr.Textbox(label="Resultados", lines=15, interactive=False)
101
-
102
  model_btn.click(fn=search_models, inputs=[model_query, model_lang, model_limit], outputs=model_output)
103
 
104
  with gr.Tab("📝 Analisar Texto"):
@@ -113,22 +98,23 @@ with gr.Blocks(title="LEX - Assistente Jurídico", theme=gr.themes.Soft()) as de
113
  analyze_btn = gr.Button("Analisar", variant="primary")
114
  with gr.Column():
115
  analyze_output = gr.Textbox(label="Resultado", lines=10, interactive=False)
116
-
117
  analyze_btn.click(fn=analyze_text, inputs=[text_input, task_type], outputs=analyze_output)
118
 
119
  with gr.Tab("📊 Explorar Dataset"):
120
  with gr.Row():
121
  with gr.Column():
122
- dataset_input = gr.Textbox(
123
- label="Dataset ID",
124
- value="joelniklaus/brazilian_court_decisions"
125
- )
126
  sample_count = gr.Slider(1, 10, value=3, step=1, label="Amostras")
127
  dataset_btn = gr.Button("Explorar", variant="primary")
128
  with gr.Column():
129
  dataset_output = gr.Textbox(label="Dataset Info", lines=15, interactive=False)
130
-
131
  dataset_btn.click(fn=explore_dataset, inputs=[dataset_input, sample_count], outputs=dataset_output)
132
 
133
  if __name__ == "__main__":
134
- demo.launch()
 
 
 
 
 
 
 
2
  import gradio as gr
3
  import os
4
  import httpx
5
+ from huggingface_hub import list_models
6
  from datasets import load_dataset
7
 
8
  HF_TOKEN = os.getenv("HF_TOKEN")
9
 
10
  def search_models(query="legal", language="pt", limit=5):
 
11
  try:
12
  results = list(
13
  list_models(
 
19
  token=HF_TOKEN,
20
  )
21
  )
 
22
  output = ""
23
  for m in results[:int(limit)]:
24
  output += f"**{m.modelId}**\n"
25
  output += f"- Task: {m.pipeline_tag}\n"
26
  output += f"- Downloads: {m.downloads:,}\n"
27
  output += f"- URL: https://huggingface.co/{m.modelId}\n\n"
 
28
  return output if output else "Nenhum modelo encontrado."
29
  except Exception as e:
30
  return f"Erro: {str(e)}"
31
 
32
  def analyze_text(text, task="summarization"):
 
33
  try:
34
  models = {
35
  "summarization": "facebook/bart-large-cnn",
36
  "text-classification": "nlpaueb/legal-bert-base-uncased",
37
  }
 
38
  model_id = models.get(task, "facebook/bart-large-cnn")
39
  url = f"https://api-inference.huggingface.co/models/{model_id}"
40
  headers = {"Content-Type": "application/json"}
 
41
  if HF_TOKEN:
42
+ headers["Authorization"] = f"Bearer {HF_TOKEN}"
 
43
  payload = {"inputs": text[:512]}
 
44
  with httpx.Client(timeout=60.0) as client:
45
  resp = client.post(url, headers=headers, json=payload)
 
46
  if resp.status_code == 200:
47
  return str(resp.json())
48
  else:
 
51
  return f"Erro: {str(e)}"
52
 
53
  def explore_dataset(dataset_id, n_samples=3):
 
54
  try:
55
  ds = load_dataset(
56
  dataset_id,
 
58
  token=HF_TOKEN,
59
  trust_remote_code=False,
60
  )
 
61
  output = f"**Dataset:** {dataset_id}\n\n"
62
  output += f"**Colunas:** {', '.join(ds.column_names)}\n\n"
 
63
  for i, sample in enumerate(ds.to_list()[:int(n_samples)]):
64
  output += f"--- Amostra {i+1} ---\n"
65
  for key, val in sample.items():
 
67
  val = val[:300] + "..."
68
  output += f"{key}: {val}\n"
69
  output += "\n"
 
70
  return output
71
  except Exception as e:
72
  return f"Erro: {str(e)}"
73
 
 
74
  with gr.Blocks(title="LEX - Assistente Jurídico", theme=gr.themes.Soft()) as demo:
75
  gr.Markdown("# ⚖️ LEX - Assistente Jurídico MCP")
76
  gr.Markdown("Powered by Hugging Face Hub + FastMCP")
 
84
  model_btn = gr.Button("Buscar", variant="primary")
85
  with gr.Column():
86
  model_output = gr.Textbox(label="Resultados", lines=15, interactive=False)
 
87
  model_btn.click(fn=search_models, inputs=[model_query, model_lang, model_limit], outputs=model_output)
88
 
89
  with gr.Tab("📝 Analisar Texto"):
 
98
  analyze_btn = gr.Button("Analisar", variant="primary")
99
  with gr.Column():
100
  analyze_output = gr.Textbox(label="Resultado", lines=10, interactive=False)
 
101
  analyze_btn.click(fn=analyze_text, inputs=[text_input, task_type], outputs=analyze_output)
102
 
103
  with gr.Tab("📊 Explorar Dataset"):
104
  with gr.Row():
105
  with gr.Column():
106
+ dataset_input = gr.Textbox(label="Dataset ID", value="joelniklaus/brazilian_court_decisions")
 
 
 
107
  sample_count = gr.Slider(1, 10, value=3, step=1, label="Amostras")
108
  dataset_btn = gr.Button("Explorar", variant="primary")
109
  with gr.Column():
110
  dataset_output = gr.Textbox(label="Dataset Info", lines=15, interactive=False)
 
111
  dataset_btn.click(fn=explore_dataset, inputs=[dataset_input, sample_count], outputs=dataset_output)
112
 
113
  if __name__ == "__main__":
114
+ # ✅ CORREÇÃO: Configuração correta para HF Spaces
115
+ demo.launch(
116
+ server_name="0.0.0.0",
117
+ server_port=int(os.getenv("PORT", 7860)),
118
+ share=False,
119
+ show_error=True
120
+ )